mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-11 23:32:53 +00:00
Compare commits
223 Commits
dependabot
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1feaafc03e | ||
|
|
e446ce54f6 | ||
|
|
b7289b51c7 | ||
|
|
6eae73f0bf | ||
|
|
07a3cf759d | ||
|
|
8f8367be65 | ||
|
|
0d80995e62 | ||
|
|
1425ebb369 | ||
|
|
04364ef2ca | ||
|
|
3b5118b45e | ||
|
|
c556603ce2 | ||
|
|
8708d4fda8 | ||
|
|
0db7816fa6 | ||
|
|
b5ccc4373d | ||
|
|
8f2016000a | ||
|
|
327ea3ea13 | ||
|
|
80211d77e5 | ||
|
|
4b6708de2c | ||
|
|
65eaceee0b | ||
|
|
2d8a87fec4 | ||
|
|
eae5bea469 | ||
|
|
87dbc16b0a | ||
|
|
db2193c53a | ||
|
|
643dd784ea | ||
|
|
e7166fc9e9 | ||
|
|
bfb431fcdf | ||
|
|
2d93ab261e | ||
|
|
fcb7fc9356 | ||
|
|
727a4fd0ed | ||
|
|
aa3bd251dd | ||
|
|
dad85b6fc3 | ||
|
|
78e9470028 | ||
|
|
4ba2effaac | ||
|
|
f592a264a6 | ||
|
|
e39374f335 | ||
|
|
10ef43e147 | ||
|
|
b7052c2cb1 | ||
|
|
57370296ab | ||
|
|
f4c4653c08 | ||
|
|
987edf5037 | ||
|
|
99e821a870 | ||
|
|
041e5e2a7e | ||
|
|
8e58099674 | ||
|
|
a43f14b071 | ||
|
|
26053ae6d6 | ||
|
|
60203ad01b | ||
|
|
bcdc30b59a | ||
|
|
a1026cb531 | ||
|
|
f30b9f9504 | ||
|
|
8688568ffc | ||
|
|
61bf2ef777 | ||
|
|
14b34f08cc | ||
|
|
add66eac42 | ||
|
|
096436507e | ||
|
|
554b04e6ca | ||
|
|
c594026c1f | ||
|
|
46776898ab | ||
|
|
fdcfed84f9 | ||
|
|
dbeb16aad7 | ||
|
|
f0c97c489d | ||
|
|
3244cc605f | ||
|
|
6a0307142c | ||
|
|
1ec622245b | ||
|
|
31fb828f8e | ||
|
|
7286d24c35 | ||
|
|
7e4797f588 | ||
|
|
f238a7e47b | ||
|
|
0b2e7d1238 | ||
|
|
73864e31ff | ||
|
|
8a95d512b3 | ||
|
|
4d1802233a | ||
|
|
f73443659a | ||
|
|
7111f3cea2 | ||
|
|
845eee4e60 | ||
|
|
c50ab4a6ea | ||
|
|
6a3f821606 | ||
|
|
34dc381182 | ||
|
|
29b01c3170 | ||
|
|
84571bc54d | ||
|
|
9c1c7d20ff | ||
|
|
7bc57b5a5f | ||
|
|
e7b5d20f4c | ||
|
|
aedc0fe5e2 | ||
|
|
dbaa25405d | ||
|
|
91357b28c4 | ||
|
|
e0c08f03cf | ||
|
|
a56ab10f23 | ||
|
|
d39ad6f208 | ||
|
|
300bc70c68 | ||
|
|
13041b40c2 | ||
|
|
4ffb29d750 | ||
|
|
fe799d7546 | ||
|
|
d91d50f696 | ||
|
|
9dfa108579 | ||
|
|
4cac891fb9 | ||
|
|
5d02af3ce3 | ||
|
|
2944c0dad4 | ||
|
|
cd103add11 | ||
|
|
dc91d6ee67 | ||
|
|
cfc12dc6bf | ||
|
|
9c09d04979 | ||
|
|
20af2c20c5 | ||
|
|
60dd3dc832 | ||
|
|
a5d32f29da | ||
|
|
27ca08b5a5 | ||
|
|
fdf439963c | ||
|
|
975f647323 | ||
|
|
d96434c8c9 | ||
|
|
64e3643006 | ||
|
|
758f6a4847 | ||
|
|
f6b3852d2f | ||
|
|
981b29b4cb | ||
|
|
7688579f75 | ||
|
|
e63486b677 | ||
|
|
bea82a61d6 | ||
|
|
3fc33d3c46 | ||
|
|
99d87aae5b | ||
|
|
960a596e7b | ||
|
|
695a94707d | ||
|
|
8d7957dfae | ||
|
|
a3169aeff3 | ||
|
|
e4726b2389 | ||
|
|
9dc27555bc | ||
|
|
39892abef2 | ||
|
|
c565da2ea6 | ||
|
|
324c2fb448 | ||
|
|
c870eb1645 | ||
|
|
dc3da29f3e | ||
|
|
2579ef1093 | ||
|
|
fa374b6143 | ||
|
|
67cf896eaf | ||
|
|
ad11b38468 | ||
|
|
b9cf90f11c | ||
|
|
f947092f1a | ||
|
|
82367e7ff6 | ||
|
|
df07c39014 | ||
|
|
f2538207f3 | ||
|
|
d1e5d6b13a | ||
|
|
99145bee70 | ||
|
|
9e5769c304 | ||
|
|
21b998e2c5 | ||
|
|
fbeab7291e | ||
|
|
d4a966481b | ||
|
|
6e54879f4d | ||
|
|
e485258d25 | ||
|
|
bd7d28f004 | ||
|
|
7dbe2b4358 | ||
|
|
597cee545a | ||
|
|
9556a39a89 | ||
|
|
45755e14ee | ||
|
|
c907b316a5 | ||
|
|
75d69e1a04 | ||
|
|
a95c90411c | ||
|
|
7178946deb | ||
|
|
14d2f0b30b | ||
|
|
31a7236c7d | ||
|
|
4111eb3940 | ||
|
|
b1e5e4408f | ||
|
|
0d7ef85f98 | ||
|
|
94cf7b39a8 | ||
|
|
5e9605131b | ||
|
|
c2840f1c74 | ||
|
|
5fc76db8c0 | ||
|
|
f9f0e48e04 | ||
|
|
2e9998b20e | ||
|
|
07f30d06b9 | ||
|
|
898fa13ed7 | ||
|
|
f4517f131b | ||
|
|
f4af6156a1 | ||
|
|
d4147e406b | ||
|
|
aafb616c12 | ||
|
|
44f3166f0f | ||
|
|
fb38727b9c | ||
|
|
c29ed91442 | ||
|
|
6280ffddaa | ||
|
|
2e3f41be22 | ||
|
|
8d29051bbe | ||
|
|
6dbe772590 | ||
|
|
2aa319aa30 | ||
|
|
903ee21f31 | ||
|
|
a4e3dccdce | ||
|
|
53e99556ad | ||
|
|
166f50d776 | ||
|
|
6c9699a06d | ||
|
|
79b8cc40b1 | ||
|
|
3e39cb4b0f | ||
|
|
9b02402631 | ||
|
|
6bd8033d24 | ||
|
|
7cec76e445 | ||
|
|
420a65a116 | ||
|
|
3bf4a7dced | ||
|
|
2a5804b595 | ||
|
|
b9af3a1947 | ||
|
|
9a3fabbc55 | ||
|
|
72ddfd7d78 | ||
|
|
18260d88ca | ||
|
|
e9666f9aea | ||
|
|
75f1817cba | ||
|
|
78ddeef96c | ||
|
|
814db6541f | ||
|
|
cf7a9495c5 | ||
|
|
14a6315667 | ||
|
|
c90856de65 | ||
|
|
a0aac09f0a | ||
|
|
903caa9c02 | ||
|
|
031df8d5e0 | ||
|
|
37df853a99 | ||
|
|
0b40702900 | ||
|
|
8ee3436f5c | ||
|
|
b9159c22ca | ||
|
|
112bea520e | ||
|
|
7e15e9ba05 | ||
|
|
528392ac5b | ||
|
|
56df64b625 | ||
|
|
eb8b382816 | ||
|
|
571c9bd3ef | ||
|
|
037db22afe | ||
|
|
4c1457c318 | ||
|
|
c0699c443b | ||
|
|
69e307918b | ||
|
|
571a816a61 | ||
|
|
2de5a5c1a7 | ||
|
|
133db854b2 |
8
.github/workflows/e2e-test-kind.yaml
vendored
8
.github/workflows/e2e-test-kind.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
- name: Upload debug bundle
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: DebugBundle
|
||||
name: DebugBundle-k8s-${{ matrix.k8s }}-job-${{ strategy.job-index }}
|
||||
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
|
||||
|
||||
2
.github/workflows/get-go-version.yaml
vendored
2
.github/workflows/get-go-version.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
version: ${{ steps.pick-version.outputs.version }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- id: pick-version
|
||||
run: |
|
||||
|
||||
2
.github/workflows/nightly-trivy-scan.yml
vendored
2
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
|
||||
2
.github/workflows/pr-changelog-check.yml
vendored
2
.github/workflows/pr-changelog-check.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Changelog check
|
||||
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'kind/changelog-not-required') || contains(github.event.pull_request.labels.*.name, 'Design') || contains(github.event.pull_request.labels.*.name, 'Website') || contains(github.event.pull_request.labels.*.name, 'Documentation'))}}
|
||||
|
||||
2
.github/workflows/pr-ci-check.yml
vendored
2
.github/workflows/pr-ci-check.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
2
.github/workflows/pr-codespell.yml
vendored
2
.github/workflows/pr-codespell.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@master
|
||||
|
||||
2
.github/workflows/pr-containers.yml
vendored
2
.github/workflows/pr-containers.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
name: Checkout
|
||||
|
||||
- name: Set up QEMU
|
||||
|
||||
2
.github/workflows/pr-goreleaser.yml
vendored
2
.github/workflows/pr-goreleaser.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
name: Checkout
|
||||
|
||||
- name: Verify .goreleaser.yml and try a dryrun release.
|
||||
|
||||
6
.github/workflows/pr-linter-check.yml
vendored
6
.github/workflows/pr-linter-check.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.1.1
|
||||
version: v2.5.0
|
||||
args: --verbose
|
||||
|
||||
2
.github/workflows/push-builder.yml
vendored
2
.github/workflows/push-builder.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
|
||||
# there are at least two commits: the first one is the merge commit and the second one is the real commit
|
||||
|
||||
4
.github/workflows/push.yml
vendored
4
.github/workflows/push.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
ref: ${{ github.ref_name }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
2
.github/workflows/rebase.yml
vendored
2
.github/workflows/rebase.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the latest code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Automatic Rebase
|
||||
|
||||
2
.github/workflows/stale-issues.yml
vendored
2
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v10.0.0
|
||||
- uses: actions/stale@v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.24 as tilt-helper
|
||||
FROM golang:1.25 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
1
changelogs/unreleased/9132-mjnagel
Normal file
1
changelogs/unreleased/9132-mjnagel
Normal file
@@ -0,0 +1 @@
|
||||
Add `--apply` flag to `install` command, allowing usage of Kubernetes apply to make changes to existing installs
|
||||
1
changelogs/unreleased/9141-kaovilai
Normal file
1
changelogs/unreleased/9141-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
feat: Enhance BackupStorageLocation with Secret-based CA certificate support
|
||||
1
changelogs/unreleased/9148-Lyndon-Li
Normal file
1
changelogs/unreleased/9148-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #7725, add design for backup repo cache configuration
|
||||
1
changelogs/unreleased/9166-claude
Normal file
1
changelogs/unreleased/9166-claude
Normal file
@@ -0,0 +1 @@
|
||||
Add VolumePolicy support for PVC Phase conditions to allow skipping Pending PVCs
|
||||
1
changelogs/unreleased/9206-Joeavaikath
Normal file
1
changelogs/unreleased/9206-Joeavaikath
Normal file
@@ -0,0 +1 @@
|
||||
Remove labels associated with previous backups
|
||||
10
changelogs/unreleased/9255-Joeavaikath
Normal file
10
changelogs/unreleased/9255-Joeavaikath
Normal file
@@ -0,0 +1,10 @@
|
||||
Implement wildcard namespace pattern expansion for backup namespace includes/excludes.
|
||||
|
||||
This change adds support for wildcard patterns (*, ?, [abc], {a,b,c}) in namespace includes and excludes during backup operations.
|
||||
When wildcard patterns are detected, they are expanded against the list of active namespaces in the cluster before the backup proceeds.
|
||||
|
||||
Key features:
|
||||
- Wildcard patterns in namespace includes/excludes are automatically detected and expanded
|
||||
- Pattern validation ensures unsupported patterns (regex, consecutive asterisks) are rejected
|
||||
- Empty wildcard results (e.g., "invalid*" matching no namespaces) correctly result in empty backups
|
||||
- Exact namespace names and "*" continue to work as before (no expansion needed)
|
||||
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #7904, remove the code and doc for PVC node selection
|
||||
1
changelogs/unreleased/9291-Lyndon-Li
Normal file
1
changelogs/unreleased/9291-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9193, don't connect repo in repo controller
|
||||
1
changelogs/unreleased/9307-sseago
Normal file
1
changelogs/unreleased/9307-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Concurrent backup processing
|
||||
1
changelogs/unreleased/9321-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9321-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Sanitize Azure HTTP responses in BSL status messages
|
||||
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9332, add bytesDone for cache files
|
||||
1
changelogs/unreleased/9342-Lyndon-Li
Normal file
1
changelogs/unreleased/9342-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Add cache configuration to VGDP
|
||||
1
changelogs/unreleased/9350-blackpiglet
Normal file
1
changelogs/unreleased/9350-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Fix the Job build error when BackupReposiotry name longer than 63.
|
||||
1
changelogs/unreleased/9353-Lyndon-Li
Normal file
1
changelogs/unreleased/9353-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Add cache dir configuration for udmrepo
|
||||
1
changelogs/unreleased/9354-Lyndon-Li
Normal file
1
changelogs/unreleased/9354-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Add snapshotSize for DataDownload, PodVolumeRestore
|
||||
1
changelogs/unreleased/9357-sseago
Normal file
1
changelogs/unreleased/9357-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Add incrementalSize to DU/PVB for reporting new/changed size
|
||||
1
changelogs/unreleased/9362-Lyndon-Li
Normal file
1
changelogs/unreleased/9362-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Support cache volume for generic restore exposer and pod volume exposer
|
||||
1
changelogs/unreleased/9366-blackpiglet
Normal file
1
changelogs/unreleased/9366-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Use hookIndex for recording multiple restore exec hooks.
|
||||
1
changelogs/unreleased/9367-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9367-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Fix managed fields patch for resources using GenerateName
|
||||
1
changelogs/unreleased/9368-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9368-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Track actual resource names for GenerateName in restore status
|
||||
1
changelogs/unreleased/9370-Lyndon-Li
Normal file
1
changelogs/unreleased/9370-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Add cache volume configuration
|
||||
1
changelogs/unreleased/9375-Lyndon-Li
Normal file
1
changelogs/unreleased/9375-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9365, prevent fake completion notification due to multiple update of single PVR
|
||||
1
changelogs/unreleased/9379-Lyndon-Li
Normal file
1
changelogs/unreleased/9379-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Refactor repo provider interface for static configuration
|
||||
1
changelogs/unreleased/9389-sseago
Normal file
1
changelogs/unreleased/9389-sseago
Normal file
@@ -0,0 +1 @@
|
||||
don't copy securitycontext from first container if configmap found
|
||||
1
changelogs/unreleased/9391-Lyndon-Li
Normal file
1
changelogs/unreleased/9391-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Cache volume support for DataDownload
|
||||
1
changelogs/unreleased/9397-Lyndon-Li
Normal file
1
changelogs/unreleased/9397-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Cache volume for PVR
|
||||
1
changelogs/unreleased/9407-Lyndon-Li
Normal file
1
changelogs/unreleased/9407-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9400, connect repo first time after creation so that init params could be written
|
||||
1
changelogs/unreleased/9414-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9414-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Add Prometheus metrics for maintenance jobs
|
||||
1
changelogs/unreleased/9418-Lyndon-Li
Normal file
1
changelogs/unreleased/9418-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9276, add doc for cache volume support
|
||||
1
changelogs/unreleased/9419-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9419-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Apply volume policies to VolumeGroupSnapshot PVC filtering
|
||||
1
changelogs/unreleased/9420-Lyndon-Li
Normal file
1
changelogs/unreleased/9420-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9194, add doc for GOMAXPROCS behavior change
|
||||
1
changelogs/unreleased/9431-blackpiglet
Normal file
1
changelogs/unreleased/9431-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Remove VolumeSnapshotClass from CSI B/R process.
|
||||
1
changelogs/unreleased/9441-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9441-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Add PVC-to-Pod cache to improve volume policy performance
|
||||
1
changelogs/unreleased/9445-mpryc
Normal file
1
changelogs/unreleased/9445-mpryc
Normal file
@@ -0,0 +1 @@
|
||||
Fix plugin init container names exceeding DNS-1123 limit
|
||||
1
changelogs/unreleased/9452-blackpiglet
Normal file
1
changelogs/unreleased/9452-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Add maintenance job and data mover pod's labels and annotations setting.
|
||||
1
changelogs/unreleased/9474-blackpiglet
Normal file
1
changelogs/unreleased/9474-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Add Role, RoleBinding, ClusterRole, and ClusterRoleBinding in restore sequence.
|
||||
@@ -594,6 +594,8 @@ spec:
|
||||
description: Phase is the current state of the Backup.
|
||||
enum:
|
||||
- New
|
||||
- Queued
|
||||
- ReadyToStart
|
||||
- FailedValidation
|
||||
- InProgress
|
||||
- WaitingForPluginOperations
|
||||
@@ -625,6 +627,11 @@ spec:
|
||||
filters that happen as items are processed.
|
||||
type: integer
|
||||
type: object
|
||||
queuePosition:
|
||||
description: |-
|
||||
QueuePosition is the position of the backup in the queue.
|
||||
Only relevant when Phase is "Queued"
|
||||
type: integer
|
||||
startTimestamp:
|
||||
description: |-
|
||||
StartTimestamp records the time a backup was started.
|
||||
|
||||
@@ -113,10 +113,38 @@ spec:
|
||||
description: Bucket is the bucket to use for object storage.
|
||||
type: string
|
||||
caCert:
|
||||
description: CACert defines a CA bundle to use when verifying
|
||||
TLS connections to the provider.
|
||||
description: |-
|
||||
CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
Deprecated: Use CACertRef instead.
|
||||
format: byte
|
||||
type: string
|
||||
caCertRef:
|
||||
description: |-
|
||||
CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
when verifying TLS connections to the provider. The Secret must be in the same
|
||||
namespace as the BackupStorageLocation.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must be
|
||||
a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be
|
||||
defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: Prefix is the path inside a bucket to use for Velero
|
||||
storage. Optional.
|
||||
|
||||
@@ -33,6 +33,12 @@ spec:
|
||||
jsonPath: .status.progress.totalBytes
|
||||
name: Total Bytes
|
||||
type: integer
|
||||
- description: Incremental bytes
|
||||
format: int64
|
||||
jsonPath: .status.incrementalBytes
|
||||
name: Incremental Bytes
|
||||
priority: 10
|
||||
type: integer
|
||||
- description: Name of the Backup Storage Location where this backup should be
|
||||
stored
|
||||
jsonPath: .spec.backupStorageLocation
|
||||
@@ -189,6 +195,11 @@ spec:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
incrementalBytes:
|
||||
description: IncrementalBytes holds the number of bytes new or changed
|
||||
since the last backup
|
||||
format: int64
|
||||
type: integer
|
||||
message:
|
||||
description: Message is a message about the pod volume backup's status.
|
||||
type: string
|
||||
|
||||
@@ -133,6 +133,10 @@ spec:
|
||||
snapshotID:
|
||||
description: SnapshotID is the ID of the volume snapshot to be restored.
|
||||
type: string
|
||||
snapshotSize:
|
||||
description: SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
format: int64
|
||||
type: integer
|
||||
sourceNamespace:
|
||||
description: SourceNamespace is the original namespace for namaspace
|
||||
mapping.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -108,6 +108,10 @@ spec:
|
||||
description: SnapshotID is the ID of the Velero backup snapshot to
|
||||
be restored from.
|
||||
type: string
|
||||
snapshotSize:
|
||||
description: SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
format: int64
|
||||
type: integer
|
||||
sourceNamespace:
|
||||
description: |-
|
||||
SourceNamespace is the original namespace where the volume is backed up from.
|
||||
|
||||
@@ -33,6 +33,12 @@ spec:
|
||||
jsonPath: .status.progress.totalBytes
|
||||
name: Total Bytes
|
||||
type: integer
|
||||
- description: Incremental bytes
|
||||
format: int64
|
||||
jsonPath: .status.incrementalBytes
|
||||
name: Incremental Bytes
|
||||
priority: 10
|
||||
type: integer
|
||||
- description: Name of the Backup Storage Location where this backup should be
|
||||
stored
|
||||
jsonPath: .spec.backupStorageLocation
|
||||
@@ -173,6 +179,11 @@ spec:
|
||||
as a result of the DataUpload.
|
||||
nullable: true
|
||||
type: object
|
||||
incrementalBytes:
|
||||
description: IncrementalBytes holds the number of bytes new or changed
|
||||
since the last backup
|
||||
format: int64
|
||||
type: integer
|
||||
message:
|
||||
description: Message is a message about the DataUpload's status.
|
||||
type: string
|
||||
|
||||
File diff suppressed because one or more lines are too long
70
design/Implemented/apply-flag.md
Normal file
70
design/Implemented/apply-flag.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# Apply flag for install command
|
||||
|
||||
## Abstract
|
||||
Add an `--apply` flag to the install command that enables applying existing resources rather than creating them. This can be useful as part of the upgrade process for existing installations.
|
||||
|
||||
## Background
|
||||
The current Velero install command creates resources but doesn't provide a direct way to apply updates to an existing installation.
|
||||
Users attempting to run the install command on an existing installation receive "already exists" messages.
|
||||
Upgrade steps for existing installs typically involve a three (or more) step process to apply updated CRDs (using `--dry-run` and piping to `kubectl apply`) and then updating/setting images on the Velero deployment and node-agent.
|
||||
|
||||
## Goals
|
||||
- Provide a simple flag to enable applying resources on an existing Velero installation.
|
||||
- Use server-side apply to update existing resources rather than attempting to create them.
|
||||
- Maintain consistency with the regular install flow.
|
||||
|
||||
## Non Goals
|
||||
- Implement special logic for specific version-to-version upgrades (i.e. resource deletion, etc).
|
||||
- Add complex upgrade validation or pre/post-upgrade hooks.
|
||||
- Provide rollback capabilities.
|
||||
|
||||
## High-Level Design
|
||||
The `--apply` flag will be added to the Velero install command.
|
||||
When this flag is set, the installation process will use server-side apply to update existing resources instead of using create on new resources.
|
||||
This flag can be used as _part_ of the upgrade process, but will not always fully handle an upgrade.
|
||||
|
||||
## Detailed Design
|
||||
The implementation adds a new boolean flag `--apply` to the install command.
|
||||
This flag will be passed through to the underlying install functions where the resource creation logic resides.
|
||||
|
||||
When the flag is set to true:
|
||||
- The `createOrApplyResource` function will use server-side apply with field manager "velero-cli" and `force=true` to update resources.
|
||||
- Resources will be applied in the same order as they would be created during installation.
|
||||
- Custom Resource Definitions will still be processed first, and the system will wait for them to be established before continuing.
|
||||
|
||||
The server-side apply approach with `force=true` ensures that resources are updated even if there are conflicts with the last applied state.
|
||||
This provides a best-effort mechanism to apply resources that follows the same flow as installation but updates resources instead of creating them.
|
||||
|
||||
No special handling is added for specific versions or resource structures, making this a general-purpose mechanism for applying resources.
|
||||
|
||||
## Alternatives Considered
|
||||
1. Creating a separate `upgrade` command that would duplicate much of the install command logic.
|
||||
- Rejected due to code duplication and maintenance overhead.
|
||||
|
||||
2. Implementing version-specific upgrade logic to handle breaking changes between versions.
|
||||
- Rejected as overly complex and difficult to maintain across multiple version paths.
|
||||
- This could be considered again in the future, but is not in the scope of the current design.
|
||||
|
||||
3. Adding automatic detection of existing resources and switching to apply mode.
|
||||
- Rejected as it could lead to unexpected behavior and confusion if users unintentionally apply changes to existing resources.
|
||||
|
||||
## Security Considerations
|
||||
The apply flag maintains the same security profile as the install command.
|
||||
No additional permissions are required beyond what is needed for resource creation.
|
||||
The use of `force=true` with server-side apply could potentially override manual changes made to resources, but this is a necessary trade-off to ensure apply is successful.
|
||||
|
||||
## Compatibility
|
||||
This enhancement is compatible with all existing Velero installations as it is a new opt-in flag.
|
||||
It does not change any resource formats or API contracts.
|
||||
The apply process is best-effort and does not guarantee compatibility between arbitrary versions of Velero.
|
||||
Users should still consult release notes for any breaking changes that may require manual intervention.
|
||||
This flag could be adopted by the helm chart, specifically for CRD updates, to simplify the CRD update job.
|
||||
|
||||
## Implementation
|
||||
The implementation involves:
|
||||
1. Adding support for `Apply` to the existing Kubernetes client code.
|
||||
1. Adding the `--apply` flag to the install command options.
|
||||
1. Changing `createResource` to `createOrApplyResource` and updating it to use server-side apply when the `apply` boolean is set.
|
||||
|
||||
The implementation is straightforward and follows existing code patterns.
|
||||
No migration of state or special handling of specific resources is required.
|
||||
231
design/backup-repo-cache-volume.md
Normal file
231
design/backup-repo-cache-volume.md
Normal file
@@ -0,0 +1,231 @@
|
||||
# Backup Repository Cache Volume Design
|
||||
|
||||
## Glossary & Abbreviation
|
||||
|
||||
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
|
||||
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
|
||||
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
|
||||
**Data Mover Pods**: Intermediate pods which hold VGDP and complete the data transfer. See [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3] for details.
|
||||
**Repository Maintenance Pods**: Pods for [Repository Maintenance Jobs][4], which holds VGDP to run repository maintenance.
|
||||
|
||||
## Background
|
||||
|
||||
According to the [Unified Repository design][1] Velero uses selectable backup repositories for various backup/restore methods, i.e., fs-backup, volume snapshot data movement, etc. Some backup repositories may need to cache data on the client side for various repository operation, so as to accelerate the execution.
|
||||
In the existing [Backup Repository Configuration][5], we allow users to configure the cache data size (`cacheLimitMB`). However, the cache data is still stored in the root file system of data mover pods/repository maintenance pods, so stored in the root file system of the node. This is not good enough, reasons:
|
||||
- In many distributions, the node's system disk size is predefined, non configurable and limit, e.g., the system disk size may be 20G or less
|
||||
- Velero supports concurrent data movements in each node. The cache in each of the concurrent data mover pods could quickly run out of the system disk and cause problems like pod eviction, failure of pod creation, degradation of Kubernetes QoS, etc.
|
||||
|
||||
We need to allow users to prepare a dedicated location, e.g., a dedictated volume, for the cache.
|
||||
Not all backup repositories or not all backup repository operations require cache, we need to define the details when and how the cache is used.
|
||||
|
||||
## Goals
|
||||
|
||||
- Create a mechanism for users to configure cache volumes for various pods running VGDP
|
||||
- Design the workflow to assign the cache volume pod path to backup repositories
|
||||
- Describe when and how the cache volume is used
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- The solution is based on [Unified Repository design][1], [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3], legacy data paths are not supported. E.g., when a pod volume restore (PVR) runs with legacy Restic path, if any data is cached, the cache still resides in the root file system.
|
||||
|
||||
## Solution
|
||||
|
||||
### Cache Data
|
||||
|
||||
Varying on backup repositoires, cache data may include payload data or repository metadata, e.g., indexes to the payload data chunks.
|
||||
|
||||
Payload data is highly related to the backup data, and normally take the majority of the repository data as well as the cache data.
|
||||
|
||||
Repository metadata is related to the backup repository's chunking algorithm, data chunk mapping method, etc, and so the size is not proportional to the backup data size.
|
||||
On the other hand for some backup repository, in extreme cases, the repository metadata may be significantly large. E.g., Kopia's indexes are per chunks, if there are huge number of small files in the repository, Kopia's index data may be in the same level of or even larger than the payload data.
|
||||
However, in the cases that repository metadata data become the majority, other bottlenecks may emerge and concurrency of data movers may be significantly constrained, so the requirement to cache volumes may go away.
|
||||
|
||||
Therefore, for now we only consider the cache volume requirement for payload data, and leave the consideration for metadata as a future enhancement.
|
||||
|
||||
### Scenarios
|
||||
|
||||
Backup repository cache varies on backup repositories and backup repository operation during VGDP runs. Below are the scenarios when VGDP runs:
|
||||
- Data Upload for Backup: this is the process to upload/write the backup data into the backup repository, e.g., DataUpload or PodVolumeBackup. The pieces of data is almost directly written to the repository, sometimes with a small group staying shortly in the local place. That is to say, there should not be large scale data cached for this scenario, so we don't prepare dedicated cache for this scenario.
|
||||
- Repository Maintenance: Repository maintenance most often visits the backup repository's metadata and sometimes it needs to visit the file system directories from the backed up data. On the other hand, it is not practical to run concurrent maintenance jobs in one node. So the cache data is neither large nor affect the root file system too much. Therefore, we don't need to prepare dedicated cache for this scenario.
|
||||
- Data Download for Restore: this is the process to download/read the backup data from the backup repository during restore, e.g., DataDownload or PodVolumeRestore. For backup repositories for which data are stored in remote backup storages (e.g., Kopia repository stores data in remote object stores), large scale of data are cached locally to accerlerate the restore. Therefore, we need dedicate cache volumes for this scenario.
|
||||
- Backup Deletion: During this scenario, backup repository is connected, metadata is enumerated to find the repository snapshot representing the backup data. That is to say, only metadata is cached if any. Therefore, dedicated cache volumes are not required in this scenario.
|
||||
|
||||
The above analyses are based on the common behavior of backup repositories and they are not considering the case that backup repository metadata takes majority or siginficant proportion of the cache data.
|
||||
As a conclusion of the analyses, we will create dedicated cache volumes for restore scenarios.
|
||||
For other scenarios, we can add them regarded to the future changes/requirements. The mechanism to expose and connect the cache volumes should work for all scenarios. E.g., if we need to consider the backup repository metadata case, we may need cache volumes for backup and repository maintenance as well, then we can just reuse the same cache volume provision and connection mechanism to backup and repository maintenance scenarios.
|
||||
|
||||
### Cache Data and Lifecycle
|
||||
|
||||
If available, one cache volume is dedicately assigned to one data mover pod. That is, the cached data is destroyed when the data mover pod completes. Then the backup repository instance also closes.
|
||||
Cache data are fully managed by the specific backup repository. So the backup repository may also have its own way to GC the cache data.
|
||||
That is to say, cache data GC may be launched by the backup repository instance during the running of the data mover pod; then the left data are automatically destroyed when the data mover pod and the cache PVC are destroyed (cache PVC's `reclaimPolicy` is always `Deleted`, so once the cache PVC is destroyed, the volume will also be destroyed). So no specially logics are needed for cache data GC.
|
||||
|
||||
### Data Size
|
||||
|
||||
Cache volumes take storage space and cluster resources (PVC, PV), therefore, cache volumes should be created only when necessary and the volumes should be with reasonable size based on the cache data size:
|
||||
- It is not a good bargain to have cache volumes for small backups, small backups will use resident cache location (the cache location in the root file system)
|
||||
- The cache data size has a limit, the existing `cacheLimitMB` is used for this purpose. E.g., it could be set as 1024 for a 1TB backup, which means 1GB of data is cached and the old cache data exceeding this size will be cleared. Therefore, it is meaningless to set the cache volume size much larger than `cacheLimitMB`
|
||||
|
||||
### Cache Volume Size
|
||||
|
||||
The cache volume size is calculated from below factors (for Restore scenarios):
|
||||
- **Limit**: The limit of the cache data, that is represented by `cacheLimitMB`, the default value is 5GB
|
||||
- **backupSize**: The size of the backup as a reference to evaluate whether to create a cache volume. It doesn't mean the backup data really decides the cache data all the time, it is just a reference to evaluate the scale of the backup, small scale backups may need small cache data. Sometimes, backupSize is not irrelevant to the size of cache data, in this case, ResidentThreshold should not be set, Limit will be used directly. It is unlikely that backupSize is unavailable, but once that happens, ResidentThreshold is ignored, Limit will be used directly.
|
||||
- **ResidentThreshold**: The minimum backup size that a cache volume is created
|
||||
- **InflationPercentage**: Considering the overhead of the file system and the possible delay of the cache cleanup, there should be an inflation for the final volume size vs. the logical size, otherwise, the cache volume may be overrun. This inflation percentage is hardcoded, e.g., 20%.
|
||||
|
||||
A formula is as below:
|
||||
```
|
||||
cacheVolumeSize = ((backupSize != 0 ? (backupSize > residentThreshold ? limit : 0) : limit) * (100 + inflationPercentage)) / 100
|
||||
```
|
||||
Finally, the `cacheVolumeSize` will be rounded up to GiB considering the UX friendliness, storage friendliness and management friendliness.
|
||||
|
||||
### PVC/PV
|
||||
|
||||
The PVC for a cache volume is created in Velero namespace and a storage class is required for the cache PVC. The PVC's accessMode is `ReadWriteOnce` and volumeMode is `FileSystem`, so the storage class provided should support this specification. Otherwise, if the storageclass doesn't support either of the specifications, the data mover pod may be hang in `Pending` state until a timeout setting with the data movement (e.g. `prepareTimeout`) and the data movement will finally fail.
|
||||
It is not expected that the cache volume is retained after data mover pod is deleted, so the `reclaimPolicy` for the storageclass must be `Delete`.
|
||||
|
||||
To detect the problems in the storageclass and fail earlier, a validation is applied to the storageclass and once the validation fails, the cache configuration will be ignored, so the data mover pod will be created without a cache volume.
|
||||
|
||||
### Cache Volume Configurations
|
||||
|
||||
Below configurations are introduced:
|
||||
- **residentThresholdMB**: the minimum data size(in MB) to be processed (if available) that a cache volume is created
|
||||
- **cacheStorageClass**: the name of the storage class to provision the cache PVC
|
||||
|
||||
Not like `cacheLimitMB` which is set to and affect the backup repository, the above two configurations are actually data mover configurations of how to create cache volumes to data mover pods; and the two configurations don't need to be per backup repository. So we add them to the node-agent Configuration.
|
||||
|
||||
### Sample
|
||||
|
||||
Below are some examples of the node-agent configMap with the configurations:
|
||||
|
||||
Sample-1:
|
||||
```json
|
||||
{
|
||||
"cacheVolume": {
|
||||
"storageClass": "sc-1",
|
||||
"residentThresholdMB": 1024
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample-2:
|
||||
```json
|
||||
{
|
||||
"cacheVolume": {
|
||||
"storageClass": "sc-1",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample-3:
|
||||
```json
|
||||
{
|
||||
"cacheVolume": {
|
||||
"residentThresholdMB": 1024
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**sample-1**: This is a valid configuration. Restores with backup data size larger than 1G will be assigned a cache volume using storage class `sc-1`.
|
||||
**sample-2**: This is a valid configuration. Data mover pods are always assigned a cache volume using storage class `sc-1`.
|
||||
**sample-3**: This is not a valid configuration because the storage class is absent. Velero gives up creating a cache volume.
|
||||
|
||||
To create the configMap, users need to save something like the above sample to a json file and then run below command:
|
||||
```
|
||||
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
The cache volume configurations will be visited by node-agent server, so they also need to specify the `--node-agent-configmap` to the `velero node-agent` parameters.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Backup and Restore
|
||||
|
||||
The restore needs to know the backup size so as to calculate the cache volume size, some new fields are added to the DataDownload and PodVolumeRestore CRDs.
|
||||
|
||||
`snapshotSize` field is also added to DataDownload and PodVolumeRestore's `spec`:
|
||||
```yaml
|
||||
spec:
|
||||
snapshotID:
|
||||
description: SnapshotID is the ID of the Velero backup snapshot to
|
||||
be restored from.
|
||||
type: string
|
||||
snapshotSize:
|
||||
description: SnapshotSize is the logical size of the snapshot.
|
||||
format: int64
|
||||
type: integer
|
||||
```
|
||||
|
||||
`snapshotSize` represents the total size of the backup; during restore, the value is transferred from DataUpload/PodVolumeBackup's `Status.Progress.TotalBytes` to DataDownload/PodVolumeRestore.
|
||||
|
||||
It is unlikely that `Status.Progress.TotalBytes` from DataUpload/PodVolumeBackup is unavailable, but once it happens, according to the above formula, `residentThresholdMB` is ignored, cache volume size is calculated directly from cache limit for the corresponding backup repository.
|
||||
|
||||
### Exposer
|
||||
|
||||
Cache volume configurations are retrieved by node-agent and passed through DataDownload/PodVolumeRestore to GenericRestore exposer/PodVolume exposer.
|
||||
The exposers are responsible to calculate cache volume size, create cache PVCs and mount them to the restorePods.
|
||||
If the calculated cache volume size is 0, or any of the critical parameters is missing (e.g., cache volume storage class), the exposers ignore the cache volume configuration and continue with creating restorePods without cache volumes, so no impact to the result of the restore.
|
||||
|
||||
Exposers mount the cache volume to a predefined directory and pass the directory to the data mover pods through the `cache-volume-path` parameter.
|
||||
|
||||
Below data structure is added to the exposers' expose parameters:
|
||||
|
||||
```go
|
||||
type GenericRestoreExposeParam struct {
|
||||
// RestoreSize specifies the data size for the volume to be restored
|
||||
RestoreSize int64
|
||||
|
||||
// CacheVolume specifies the info for cache volumes
|
||||
CacheVolume *CacheVolumeInfo
|
||||
}
|
||||
|
||||
type PodVolumeExposeParam struct {
|
||||
// RestoreSize specifies the data size for the volume to be restored
|
||||
RestoreSize int64
|
||||
|
||||
// CacheVolume specifies the info for cache volumes
|
||||
CacheVolume *repocache.CacheConfigs
|
||||
}
|
||||
|
||||
type CacheConfigs struct {
|
||||
// StorageClass specifies the storage class for cache volumes
|
||||
StorageClass string
|
||||
|
||||
// Limit specifies the maximum size of the cache data
|
||||
Limit int64
|
||||
|
||||
// ResidentThreshold specifies the minimum size of the cache data to create a cache volume
|
||||
ResidentThreshold int64
|
||||
}
|
||||
```
|
||||
|
||||
### Data Mover Pods
|
||||
|
||||
Data mover pods retrieve the cache volume directory from `cache-volume-path` parameter and pass it to Unified Repository.
|
||||
If the directory is empty, Unified Repository uses the resident location for data cache, that is, the root file system.
|
||||
|
||||
### Kopia Repository
|
||||
|
||||
Kopia repository supports cache directory configuration for both metadata and data. The existing `SetupConnectOptions` is modified to customize the `CacheDirectory`:
|
||||
|
||||
```go
|
||||
func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions {
|
||||
...
|
||||
|
||||
return repo.ConnectOptions{
|
||||
CachingOptions: content.CachingOptions{
|
||||
CacheDirectory: cacheDir,
|
||||
...
|
||||
},
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: Implemented/vgdp-micro-service/vgdp-micro-service.md
|
||||
[3]: Implemented/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md
|
||||
[4]: Implemented/repo_maintenance_job_config.md
|
||||
[5]: Implemented/backup-repo-config.md
|
||||
417
design/bsl-certificate-support_design.md
Normal file
417
design/bsl-certificate-support_design.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Design for BSL Certificate Support Enhancement
|
||||
|
||||
## Abstract
|
||||
|
||||
This design document describes the enhancement of BackupStorageLocation (BSL) certificate management in Velero, introducing a Secret-based certificate reference mechanism (`caCertRef`) alongside the existing inline certificate field (`caCert`). This enhancement provides a more secure, Kubernetes-native approach to certificate management while enabling future CLI improvements for automatic certificate discovery.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, Velero supports TLS certificate verification for object storage providers through an inline `caCert` field in the BSL specification. While functional, this approach has several limitations:
|
||||
|
||||
- **Security**: Certificates are stored directly in the BSL YAML, potentially exposing sensitive data
|
||||
- **Management**: Certificate rotation requires updating the BSL resource itself
|
||||
- **CLI Usability**: Users must manually specify certificates when using CLI commands
|
||||
- **Size Limitations**: Large certificate bundles can make BSL resources unwieldy
|
||||
|
||||
Issue #9097 and PR #8557 highlight the need for improved certificate management that addresses these concerns while maintaining backward compatibility.
|
||||
|
||||
## Goals
|
||||
|
||||
- Provide a secure, Secret-based certificate storage mechanism
|
||||
- Maintain full backward compatibility with existing BSL configurations
|
||||
- Enable future CLI enhancements for automatic certificate discovery
|
||||
- Simplify certificate rotation and management
|
||||
- Provide clear migration path for existing users
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- Removing support for inline certificates immediately
|
||||
- Changing the behavior of existing BSL configurations
|
||||
- Implementing client-side certificate validation
|
||||
- Supporting certificates from ConfigMaps or other resource types
|
||||
|
||||
## High-Level Design
|
||||
|
||||
### API Changes
|
||||
|
||||
#### New Field: CACertRef
|
||||
|
||||
```go
|
||||
type ObjectStorageLocation struct {
|
||||
// Existing field (now deprecated)
|
||||
// +optional
|
||||
// +kubebuilder:deprecatedversion:warning="caCert is deprecated, use caCertRef instead"
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
|
||||
// New field for Secret reference
|
||||
// +optional
|
||||
CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
The `SecretKeySelector` follows standard Kubernetes patterns:
|
||||
```go
|
||||
type SecretKeySelector struct {
|
||||
// Name of the Secret
|
||||
Name string `json:"name"`
|
||||
// Key within the Secret
|
||||
Key string `json:"key"`
|
||||
}
|
||||
```
|
||||
|
||||
### Certificate Resolution Logic
|
||||
|
||||
The system follows a priority-based resolution:
|
||||
|
||||
1. If `caCertRef` is specified, retrieve certificate from the referenced Secret
|
||||
2. If `caCert` is specified (and `caCertRef` is not), use the inline certificate
|
||||
3. If neither is specified, no custom CA certificate is used
|
||||
|
||||
### Validation
|
||||
|
||||
BSL validation ensures mutual exclusivity:
|
||||
```go
|
||||
func (bsl *BackupStorageLocation) Validate() error {
|
||||
if bsl.Spec.ObjectStorage != nil &&
|
||||
bsl.Spec.ObjectStorage.CACert != nil &&
|
||||
bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
return errors.New("cannot specify both caCert and caCertRef in objectStorage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### BSL Controller Changes
|
||||
|
||||
The BSL controller incorporates validation during reconciliation:
|
||||
|
||||
```go
|
||||
func (r *backupStorageLocationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
// ... existing code ...
|
||||
|
||||
// Validate BSL configuration
|
||||
if err := location.Validate(); err != nil {
|
||||
r.logger.WithError(err).Error("BSL validation failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// ... continue reconciliation ...
|
||||
}
|
||||
```
|
||||
|
||||
### Repository Provider Integration
|
||||
|
||||
All repository providers implement consistent certificate handling:
|
||||
|
||||
```go
|
||||
func configureCACert(bsl *velerov1api.BackupStorageLocation, credGetter *credentials.CredentialGetter) ([]byte, error) {
|
||||
if bsl.Spec.ObjectStorage == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Prefer caCertRef (new method)
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
certString, err := credGetter.FromSecret.Get(bsl.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
return []byte(certString), nil
|
||||
}
|
||||
|
||||
// Fall back to caCert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
return bsl.Spec.ObjectStorage.CACert, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Certificate Discovery Integration
|
||||
|
||||
#### Background: PR #8557 Implementation
|
||||
PR #8557 ("CLI automatically discovers and uses cacert from BSL") was merged in August 2025, introducing automatic CA certificate discovery from BackupStorageLocation for Velero CLI download operations. This eliminated the need for users to manually specify the `--cacert` flag when performing operations like `backup describe`, `backup download`, `backup logs`, and `restore logs`.
|
||||
|
||||
#### Current Implementation (Post PR #8557)
|
||||
The CLI now automatically discovers certificates from BSL through the `pkg/cmd/util/cacert/bsl_cacert.go` module:
|
||||
|
||||
```go
|
||||
// Current implementation only supports inline caCert
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
// ... fetch BSL ...
|
||||
if bsl.Spec.ObjectStorage != nil && len(bsl.Spec.ObjectStorage.CACert) > 0 {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Enhancement with caCertRef Support
|
||||
This design extends the existing CLI certificate discovery to support the new `caCertRef` field:
|
||||
|
||||
```go
|
||||
// Enhanced implementation supporting both caCert and caCertRef
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
// ... fetch BSL ...
|
||||
|
||||
// Prefer caCertRef over inline caCert
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
secret := &corev1api.Secret{}
|
||||
key := types.NamespacedName{
|
||||
Name: bsl.Spec.ObjectStorage.CACertRef.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
if err := client.Get(ctx, key, secret); err != nil {
|
||||
return "", errors.Wrap(err, "error getting certificate secret")
|
||||
}
|
||||
|
||||
certData, ok := secret.Data[bsl.Spec.ObjectStorage.CACertRef.Key]
|
||||
if !ok {
|
||||
return "", errors.Errorf("key %s not found in secret",
|
||||
bsl.Spec.ObjectStorage.CACertRef.Key)
|
||||
}
|
||||
return string(certData), nil
|
||||
}
|
||||
|
||||
// Fall back to inline caCert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Certificate Resolution Priority
|
||||
|
||||
The CLI follows this priority order for certificate resolution:
|
||||
|
||||
1. **`--cacert` flag** - Manual override, highest priority
|
||||
2. **`caCertRef`** - Secret-based certificate (recommended)
|
||||
3. **`caCert`** - Inline certificate (deprecated)
|
||||
4. **System certificate pool** - Default fallback
|
||||
|
||||
#### User Experience Improvements
|
||||
|
||||
With both PR #8557 and this enhancement:
|
||||
|
||||
```bash
|
||||
# Automatic discovery - works with both caCert and caCertRef
|
||||
velero backup describe my-backup
|
||||
velero backup download my-backup
|
||||
velero backup logs my-backup
|
||||
velero restore logs my-restore
|
||||
|
||||
# Manual override still available
|
||||
velero backup describe my-backup --cacert /custom/ca.crt
|
||||
|
||||
# Debug output shows certificate source
|
||||
velero backup download my-backup --log-level=debug
|
||||
# [DEBUG] Resolved CA certificate from BSL 'default' Secret 'storage-ca-cert' key 'ca-bundle.crt'
|
||||
```
|
||||
|
||||
#### RBAC Considerations for CLI
|
||||
|
||||
CLI users need read access to Secrets when using `caCertRef`:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: velero-cli-user
|
||||
namespace: velero
|
||||
rules:
|
||||
- apiGroups: ["velero.io"]
|
||||
resources: ["backups", "restores", "backupstoragelocations"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
# Limited to secrets referenced by BSLs
|
||||
```
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
#### Phase 1: Introduction (Current)
|
||||
- Add `caCertRef` field
|
||||
- Mark `caCert` as deprecated
|
||||
- Both fields supported, mutual exclusivity enforced
|
||||
|
||||
#### Phase 2: Migration Period
|
||||
- Documentation and tools to help users migrate
|
||||
- Warning messages for `caCert` usage
|
||||
- CLI enhancements to leverage `caCertRef`
|
||||
|
||||
#### Phase 3: Future Removal
|
||||
- Remove `caCert` field in major version update
|
||||
- Provide migration tool for automatic conversion
|
||||
|
||||
## User Experience
|
||||
|
||||
### Creating a BSL with Certificate Reference
|
||||
|
||||
1. Create a Secret containing the CA certificate:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-ca-cert
|
||||
namespace: velero
|
||||
type: Opaque
|
||||
data:
|
||||
ca-bundle.crt: <base64-encoded-certificate>
|
||||
```
|
||||
|
||||
2. Reference the Secret in BSL:
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: my-bucket
|
||||
caCertRef:
|
||||
name: storage-ca-cert
|
||||
key: ca-bundle.crt
|
||||
```
|
||||
|
||||
### Certificate Rotation
|
||||
|
||||
With Secret-based certificates:
|
||||
```bash
|
||||
# Update the Secret with new certificate
|
||||
kubectl create secret generic storage-ca-cert \
|
||||
--from-file=ca-bundle.crt=new-ca.crt \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# No BSL update required - changes take effect on next use
|
||||
```
|
||||
|
||||
### CLI Usage Examples
|
||||
|
||||
#### Immediate Benefits
|
||||
- No change required for existing workflows
|
||||
- Certificate validation errors include helpful context
|
||||
|
||||
#### Future CLI Enhancements
|
||||
```bash
|
||||
# Automatic certificate discovery
|
||||
velero backup download my-backup
|
||||
|
||||
# Manual override still available
|
||||
velero backup download my-backup --cacert /custom/ca.crt
|
||||
|
||||
# Debug certificate resolution
|
||||
velero backup download my-backup --log-level=debug
|
||||
# [DEBUG] Resolved CA certificate from BSL 'default' Secret 'storage-ca-cert'
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Advantages of Secret-based Storage
|
||||
|
||||
1. **Encryption at Rest**: Secrets are encrypted in etcd
|
||||
2. **RBAC Control**: Fine-grained access control via Kubernetes RBAC
|
||||
3. **Audit Trail**: Secret access is auditable
|
||||
4. **Separation of Concerns**: Certificates separate from configuration
|
||||
|
||||
### Required Permissions
|
||||
|
||||
The Velero server requires additional RBAC permissions:
|
||||
```yaml
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
# Scoped to secrets referenced by BSLs
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- Existing BSLs with `caCert` continue to function unchanged
|
||||
- No breaking changes to API
|
||||
- Gradual migration path
|
||||
|
||||
### Forward Compatibility
|
||||
|
||||
- Design allows for future enhancements:
|
||||
- Multiple certificate support
|
||||
- Certificate chain validation
|
||||
- Automatic certificate discovery from cloud providers
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Core Implementation ✓ (Current PR)
|
||||
- API changes with new `caCertRef` field
|
||||
- Controller validation
|
||||
- Repository provider updates
|
||||
- Basic testing
|
||||
|
||||
### Phase 2: CLI Enhancement (Future)
|
||||
- Automatic certificate discovery in CLI
|
||||
- Enhanced error messages
|
||||
- Debug logging for certificate resolution
|
||||
|
||||
### Phase 3: Migration Tools (Future)
|
||||
- Automated migration scripts
|
||||
- Validation tools
|
||||
- Documentation updates
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
- BSL validation logic
|
||||
- Certificate resolution in providers
|
||||
- Controller behavior
|
||||
|
||||
### Integration Tests
|
||||
- End-to-end backup/restore with `caCertRef`
|
||||
- Certificate rotation scenarios
|
||||
- Migration from `caCert` to `caCertRef`
|
||||
|
||||
### Manual Testing Scenarios
|
||||
1. Create BSL with `caCertRef`
|
||||
2. Perform backup/restore operations
|
||||
3. Rotate certificate in Secret
|
||||
4. Verify continued operation
|
||||
|
||||
## Documentation
|
||||
|
||||
### User Documentation
|
||||
- Migration guide from `caCert` to `caCertRef`
|
||||
- Examples for common cloud providers
|
||||
- Troubleshooting guide
|
||||
|
||||
### API Documentation
|
||||
- Updated API reference
|
||||
- Deprecation notices
|
||||
- Field descriptions
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### ConfigMap-based Storage
|
||||
- Pros: Similar to Secrets, simpler API
|
||||
- Cons: Not designed for sensitive data, no encryption at rest
|
||||
- Decision: Secrets are the Kubernetes-standard for sensitive data
|
||||
|
||||
### External Certificate Management
|
||||
- Pros: Integration with cert-manager, etc.
|
||||
- Cons: Additional complexity, dependencies
|
||||
- Decision: Keep it simple, allow users to manage certificates as needed
|
||||
|
||||
### Immediate Removal of Inline Certificates
|
||||
- Pros: Cleaner API, forces best practices
|
||||
- Cons: Breaking change, migration burden
|
||||
- Decision: Gradual deprecation respects existing users
|
||||
|
||||
## Conclusion
|
||||
|
||||
This design provides a secure, Kubernetes-native approach to certificate management in Velero while maintaining backward compatibility. It establishes the foundation for enhanced CLI functionality and improved user experience, addressing the concerns raised in issue #9097 and enabling the features proposed in PR #8557.
|
||||
|
||||
The phased approach ensures smooth migration for existing users while delivering immediate security benefits for new deployments.
|
||||
115
design/wildcard-namespace-support-design.md
Normal file
115
design/wildcard-namespace-support-design.md
Normal file
@@ -0,0 +1,115 @@
|
||||
|
||||
# Wildcard Namespace Support
|
||||
|
||||
## Abstract
|
||||
|
||||
Velero currently treats namespace patterns with glob characters as literal strings. This design adds wildcard expansion to support flexible namespace selection using patterns like `app-*` or `test-{dev,staging}`.
|
||||
|
||||
## Background
|
||||
|
||||
Requested in [#1874](https://github.com/vmware-tanzu/velero/issues/1874) for more flexible namespace selection.
|
||||
|
||||
## Goals
|
||||
|
||||
- Support glob pattern expansion in namespace includes/excludes
|
||||
- Maintain backward compatibility with existing `*` behavior
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- Complex regex patterns beyond basic globs
|
||||
|
||||
## High-Level Design
|
||||
|
||||
Wildcard expansion occurs early in both backup and restore flows, converting patterns to literal namespace lists before normal processing.
|
||||
|
||||
### Backup Flow
|
||||
|
||||
Expansion happens in `getResourceItems()` before namespace collection:
|
||||
1. Check if wildcards exist using `ShouldExpandWildcards()`
|
||||
2. Expand patterns against active cluster namespaces
|
||||
3. Replace includes/excludes with expanded literal namespaces
|
||||
4. Continue with normal backup processing
|
||||
|
||||
### Restore Flow
|
||||
|
||||
Expansion occurs in `execute()` after parsing backup contents:
|
||||
1. Extract available namespaces from backup tar
|
||||
2. Expand patterns against backup namespaces (not cluster namespaces)
|
||||
3. Update restore context with expanded namespaces
|
||||
4. Continue with normal restore processing
|
||||
|
||||
This ensures restore wildcards match actual backup contents, not current cluster state.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Status Fields
|
||||
|
||||
Add wildcard expansion tracking to backup and restore CRDs:
|
||||
|
||||
```go
|
||||
type WildcardNamespaceStatus struct {
|
||||
// IncludeWildcardMatches records namespaces that matched include patterns
|
||||
// +optional
|
||||
IncludeWildcardMatches []string `json:"includeWildcardMatches,omitempty"`
|
||||
|
||||
// ExcludeWildcardMatches records namespaces that matched exclude patterns
|
||||
// +optional
|
||||
ExcludeWildcardMatches []string `json:"excludeWildcardMatches,omitempty"`
|
||||
|
||||
// WildcardResult records final namespaces after wildcard processing
|
||||
// +optional
|
||||
WildcardResult []string `json:"wildcardResult,omitempty"`
|
||||
}
|
||||
|
||||
// Added to both BackupStatus and RestoreStatus
|
||||
type BackupStatus struct {
|
||||
// WildcardNamespaces contains wildcard expansion results
|
||||
// +optional
|
||||
WildcardNamespaces *WildcardNamespaceStatus `json:"wildcardNamespaces,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
### Wildcard Expansion Package
|
||||
|
||||
New `pkg/util/wildcard/expand.go` package provides:
|
||||
|
||||
- `ShouldExpandWildcards()` - Skip expansion for simple "*" case
|
||||
- `ExpandWildcards()` - Main expansion function using `github.com/gobwas/glob`
|
||||
- Pattern validation rejecting unsupported regex symbols
|
||||
|
||||
**Supported patterns**: `*`, `?`, `[abc]`, `{a,b,c}`
|
||||
**Unsupported**: `|()`, `**`
|
||||
|
||||
### Implementation Details
|
||||
|
||||
#### Backup Integration (`pkg/backup/item_collector.go`)
|
||||
|
||||
Expansion in `getResourceItems()`:
|
||||
- Call `wildcard.ExpandWildcards()` with cluster namespaces
|
||||
- Update `NamespaceIncludesExcludes` with expanded results
|
||||
- Populate status fields with expansion results
|
||||
|
||||
#### Restore Integration (`pkg/restore/restore.go`)
|
||||
|
||||
Expansion in `execute()`:
|
||||
```go
|
||||
if wildcard.ShouldExpandWildcards(includes, excludes) {
|
||||
availableNamespaces := extractNamespacesFromBackup(backupResources)
|
||||
expandedIncludes, expandedExcludes, err := wildcard.ExpandWildcards(
|
||||
availableNamespaces, includes, excludes)
|
||||
// Update context and status
|
||||
}
|
||||
```
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
1. **Client-side expansion**: Rejected because it wouldn't work for scheduled backups
|
||||
2. **Expansion in `collectNamespaces`**: Rejected because these functions expect literal namespaces
|
||||
|
||||
## Compatibility
|
||||
|
||||
Maintains full backward compatibility - existing "*" behavior unchanged.
|
||||
|
||||
## Implementation
|
||||
|
||||
Target: Velero 1.18
|
||||
122
go.mod
122
go.mod
@@ -1,14 +1,14 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.24.0
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
|
||||
cloud.google.com/go/storage v1.57.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.3
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.14
|
||||
@@ -31,23 +31,22 @@ require (
|
||||
github.com/onsi/gomega v1.36.1
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/mod v0.26.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/api v0.241.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/mod v0.30.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
golang.org/x/text v0.31.0
|
||||
google.golang.org/api v0.256.0
|
||||
google.golang.org/grpc v1.77.0
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.33.3
|
||||
k8s.io/apiextensions-apiserver v0.33.3
|
||||
@@ -64,19 +63,19 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go v0.121.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.6 // indirect
|
||||
cloud.google.com/go/auth v0.17.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
@@ -94,18 +93,18 @@ require (
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
@@ -113,36 +112,36 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gofrs/flock v0.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.3 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/klauspost/crc32 v1.3.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.4 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.6 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/minio/crc64nvme v1.1.0 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.94 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.97 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
@@ -154,43 +153,44 @@ require (
|
||||
github.com/natefinch/atomic v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/oklog/run v1.0.0 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/vladimirvivien/gexe v0.1.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
@@ -198,4 +198,4 @@ require (
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20251230033609-d946b1e75197
|
||||
|
||||
270
go.sum
270
go.sum
@@ -1,7 +1,7 @@
|
||||
al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho=
|
||||
al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
|
||||
cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss=
|
||||
cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
@@ -24,10 +24,10 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw=
|
||||
cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=
|
||||
cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
|
||||
cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
|
||||
cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c=
|
||||
cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI=
|
||||
cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
|
||||
cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
@@ -36,8 +36,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
|
||||
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
@@ -45,8 +45,8 @@ cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E=
|
||||
cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY=
|
||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -59,19 +59,19 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0=
|
||||
cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY=
|
||||
cloud.google.com/go/storage v1.57.2 h1:sVlym3cHGYhrp6XZKkKb+92I1V42ks2qKKpB0CF5Mb4=
|
||||
cloud.google.com/go/storage v1.57.2/go.mod h1:n5ijg4yiRXXpCu0sJTD6k+eMf7GRrJmPyr9YxLXGHOk=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
|
||||
@@ -80,10 +80,10 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsI
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
@@ -95,20 +95,20 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 h1:IEjq88XO4PuBDcvmjQJcQGg+w+UaafSy8G5Kcb5tBhI=
|
||||
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5/go.mod h1:exZ0C/1emQJAw5tHOaUDyY1ycttqBAPcxuzf7QbY6ec=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
@@ -189,8 +189,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k=
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@@ -211,8 +211,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
@@ -229,10 +227,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=
|
||||
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
@@ -266,8 +264,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
@@ -301,21 +299,19 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
|
||||
github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -403,12 +399,12 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
|
||||
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
@@ -424,12 +420,12 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hanwen/go-fuse/v2 v2.8.0 h1:wV8rG7rmCz8XHSOwBZhG5YcVqcYjkzivjmbaMafPlAs=
|
||||
github.com/hanwen/go-fuse/v2 v2.8.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI=
|
||||
github.com/hanwen/go-fuse/v2 v2.9.0 h1:0AOGUkHtbOVeyGLr0tXupiid1Vg7QB7M6YUcdmVdC58=
|
||||
github.com/hanwen/go-fuse/v2 v2.9.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
|
||||
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
||||
@@ -486,18 +482,20 @@ github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXw
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
|
||||
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA=
|
||||
github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU=
|
||||
github.com/klauspost/reedsolomon v1.12.6 h1:8pqE9aECQG/ZFitiUD1xK/E83zwosBAZtE3UbuZM8TQ=
|
||||
github.com/klauspost/reedsolomon v1.12.6/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557 h1:je1C/xnmKxnaJsIgj45me5qA51TgtK9uMwTxgDw+9H0=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557/go.mod h1:h53A5JM3t2qiwxqxusBe+PFgGcgZdS+DWCQvG5PTlto=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d h1:U3VB/cDMsPW4zB4JRFbVRDzIpPytt889rJUKAG40NPA=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d/go.mod h1:h53A5JM3t2qiwxqxusBe+PFgGcgZdS+DWCQvG5PTlto=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -535,12 +533,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
|
||||
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q=
|
||||
github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM=
|
||||
github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc=
|
||||
github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ=
|
||||
github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
@@ -599,8 +597,8 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk=
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
@@ -617,12 +615,12 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777 h1:T7t+u+mnF33qFTDq7bIMSMB51BEA8zkD7aU6tFQNZ6E=
|
||||
github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777/go.mod h1:qlSnPHrsV8eEeU4l4zqEw8mJ5CUeXr7PDiJNI4r4Bus=
|
||||
github.com/project-velero/kopia v0.0.0-20251230033609-d946b1e75197 h1:iGkfuELGvFCqW+zcrhf2GsOwNH1nWYBsC69IOc57KJk=
|
||||
github.com/project-velero/kopia v0.0.0-20251230033609-d946b1e75197/go.mod h1:RL4KehCNKEIDNltN7oruSa3ldwBNVPmQbwmN3Schbjc=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -630,22 +628,20 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -683,8 +679,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -702,8 +698,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tg123/go-htpasswd v1.2.4 h1:HgH8KKCjdmo7jjXWN9k1nefPBd7Be3tFCTjc2jPraPU=
|
||||
github.com/tg123/go-htpasswd v1.2.4/go.mod h1:EKThQok9xHkun6NBMynNv6Jmu24A33XdZzzl4Q7H1+0=
|
||||
@@ -731,8 +727,6 @@ github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
|
||||
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
|
||||
github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
|
||||
github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
|
||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
@@ -746,26 +740,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
|
||||
@@ -780,8 +774,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
|
||||
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -794,8 +790,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -833,8 +829,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -880,8 +876,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -895,8 +891,8 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -908,8 +904,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -973,14 +969,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -990,14 +986,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1051,14 +1047,16 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@@ -1081,8 +1079,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE=
|
||||
google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
|
||||
google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI=
|
||||
google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1134,12 +1132,12 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -1161,8 +1159,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1176,8 +1174,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.24-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -94,7 +94,7 @@ RUN ARCH=$(go env GOARCH) && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.1
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
|
||||
@@ -103,6 +103,14 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
|
||||
snapCont.ResourceVersion = ""
|
||||
|
||||
if snapCont.Spec.VolumeSnapshotClassName != nil {
|
||||
// Delete VolumeSnapshotClass from the VolumeSnapshotContent.
|
||||
// This is necessary to make the deletion independent of the VolumeSnapshotClass.
|
||||
snapCont.Spec.VolumeSnapshotClassName = nil
|
||||
p.log.Debugf("Deleted VolumeSnapshotClassName from VolumeSnapshotContent %s to make deletion independent of VolumeSnapshotClass",
|
||||
snapCont.Name)
|
||||
}
|
||||
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).VolumeSnapshotClassName("volumesnapshotclass").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: false,
|
||||
function: func(
|
||||
@@ -82,7 +82,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
name: "Error case, deletion fails",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
|
||||
@@ -169,7 +169,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookLog.Error(err)
|
||||
errors = append(errors, err)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookFailed = true
|
||||
}
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, hookFailed, hookErr)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, hookFailed, hookErr)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -239,7 +239,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
// containers to become ready.
|
||||
// Each unexecuted hook is logged as an error and this error will be returned from this function.
|
||||
for _, hooks := range byContainer {
|
||||
for i, hook := range hooks {
|
||||
for _, hook := range hooks {
|
||||
if hook.executed {
|
||||
continue
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
},
|
||||
)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
||||
@@ -706,6 +706,130 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple hooks with non-sequential indices (bug #9359)",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
groupResource: "pods",
|
||||
byContainer: map[string][]PodExecRestoreHook{
|
||||
"container1": {
|
||||
{
|
||||
HookName: "first-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/foo"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
{
|
||||
HookName: "second-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/bar"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 2,
|
||||
},
|
||||
{
|
||||
HookName: "third-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/third"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedExecutions: []expectedExecution{
|
||||
{
|
||||
name: "first-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/foo"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "second-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/bar"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "third-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/third"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
},
|
||||
expectedErrors: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -146,6 +146,9 @@ func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
|
||||
if len(con.PVCLabels) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels})
|
||||
}
|
||||
if len(con.PVCPhase) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcPhaseCondition{phases: con.PVCPhase})
|
||||
}
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
|
||||
@@ -191,6 +194,9 @@ func (p *Policies) GetMatchAction(res any) (*Action, error) {
|
||||
if data.PVC != nil {
|
||||
volume.parsePVC(data.PVC)
|
||||
}
|
||||
case data.PVC != nil:
|
||||
// Handle PVC-only scenarios (e.g., unbound PVCs)
|
||||
volume.parsePVC(data.PVC)
|
||||
default:
|
||||
return nil, errors.New("failed to convert object")
|
||||
}
|
||||
|
||||
@@ -983,6 +983,69 @@ volumePolicies:
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC phase matching - Pending phase should skip",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcPhase: ["Pending"]
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-pending",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PVC phase matching - Bound phase should not skip",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcPhase: ["Pending"]
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-bound",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC phase matching - Multiple phases (Pending, Lost)",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcPhase: ["Pending", "Lost"]
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-lost",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimLost,
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
@@ -1059,32 +1122,53 @@ func TestParsePVC(t *testing.T) {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectedLabels map[string]string
|
||||
expectedPhase string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid PVC with labels",
|
||||
name: "valid PVC with labels and Pending phase",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"env": "prod"},
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
expectedLabels: map[string]string{"env": "prod"},
|
||||
expectedPhase: "Pending",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid PVC with empty labels",
|
||||
name: "valid PVC with Bound phase",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
expectedLabels: nil,
|
||||
expectedPhase: "Bound",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid PVC with Lost phase",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimLost,
|
||||
},
|
||||
},
|
||||
expectedLabels: nil,
|
||||
expectedPhase: "Lost",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil PVC pointer",
|
||||
pvc: (*corev1api.PersistentVolumeClaim)(nil),
|
||||
expectedLabels: nil,
|
||||
expectedPhase: "",
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
@@ -1095,6 +1179,66 @@ func TestParsePVC(t *testing.T) {
|
||||
s.parsePVC(tc.pvc)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, s.pvcLabels)
|
||||
assert.Equal(t, tc.expectedPhase, s.pvcPhase)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCPhaseMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
condition *pvcPhaseCondition
|
||||
volume *structuredVolume
|
||||
expectedMatch bool
|
||||
}{
|
||||
{
|
||||
name: "match Pending phase",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending"}},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match multiple phases - Pending matches",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending", "Bound"}},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match multiple phases - Bound matches",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending", "Bound"}},
|
||||
volume: &structuredVolume{pvcPhase: "Bound"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "no match for different phase",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending"}},
|
||||
volume: &structuredVolume{pvcPhase: "Bound"},
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "no match for empty phase",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending"}},
|
||||
volume: &structuredVolume{pvcPhase: ""},
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "match with empty phases list (always match)",
|
||||
condition: &pvcPhaseCondition{phases: []string{}},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match with nil phases list (always match)",
|
||||
condition: &pvcPhaseCondition{phases: nil},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.condition.match(tc.volume)
|
||||
assert.Equal(t, tc.expectedMatch, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ type structuredVolume struct {
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
pvcLabels map[string]string
|
||||
pvcPhase string
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
@@ -70,8 +71,11 @@ func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePVC(pvc *corev1api.PersistentVolumeClaim) {
|
||||
if pvc != nil && len(pvc.GetLabels()) > 0 {
|
||||
s.pvcLabels = pvc.Labels
|
||||
if pvc != nil {
|
||||
if len(pvc.GetLabels()) > 0 {
|
||||
s.pvcLabels = pvc.Labels
|
||||
}
|
||||
s.pvcPhase = string(pvc.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,6 +114,31 @@ func (c *pvcLabelsCondition) validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// pvcPhaseCondition defines a condition that matches if the PVC's phase matches any of the provided phases.
|
||||
type pvcPhaseCondition struct {
|
||||
phases []string
|
||||
}
|
||||
|
||||
func (c *pvcPhaseCondition) match(v *structuredVolume) bool {
|
||||
// No phases specified: always match.
|
||||
if len(c.phases) == 0 {
|
||||
return true
|
||||
}
|
||||
if v.pvcPhase == "" {
|
||||
return false
|
||||
}
|
||||
for _, phase := range c.phases {
|
||||
if v.pvcPhase == phase {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *pvcPhaseCondition) validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type capacityCondition struct {
|
||||
capacity capacity
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ type volumeConditions struct {
|
||||
CSI *csiVolumeSource `yaml:"csi,omitempty"`
|
||||
VolumeTypes []SupportedVolume `yaml:"volumeTypes,omitempty"`
|
||||
PVCLabels map[string]string `yaml:"pvcLabels,omitempty"`
|
||||
PVCPhase []string `yaml:"pvcPhase,omitempty"`
|
||||
}
|
||||
|
||||
func (c *capacityCondition) validate() error {
|
||||
|
||||
@@ -170,6 +170,9 @@ type SnapshotDataMovementInfo struct {
|
||||
// Moved snapshot data size.
|
||||
Size int64 `json:"size"`
|
||||
|
||||
// Moved snapshot incremental size.
|
||||
IncrementalSize int64 `json:"incrementalSize,omitempty"`
|
||||
|
||||
// The DataUpload's Status.Phase value
|
||||
Phase velerov2alpha1.DataUploadPhase
|
||||
}
|
||||
@@ -217,6 +220,9 @@ type PodVolumeInfo struct {
|
||||
// The snapshot corresponding volume size.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// The incremental snapshot size.
|
||||
IncrementalSize int64 `json:"incrementalSize,omitempty"`
|
||||
|
||||
// The type of the uploader that uploads the data. The valid values are `kopia` and `restic`.
|
||||
UploaderType string `json:"uploaderType"`
|
||||
|
||||
@@ -240,14 +246,15 @@ type PodVolumeInfo struct {
|
||||
|
||||
func newPodVolumeInfoFromPVB(pvb *velerov1api.PodVolumeBackup) *PodVolumeInfo {
|
||||
return &PodVolumeInfo{
|
||||
SnapshotHandle: pvb.Status.SnapshotID,
|
||||
Size: pvb.Status.Progress.TotalBytes,
|
||||
UploaderType: pvb.Spec.UploaderType,
|
||||
VolumeName: pvb.Spec.Volume,
|
||||
PodName: pvb.Spec.Pod.Name,
|
||||
PodNamespace: pvb.Spec.Pod.Namespace,
|
||||
NodeName: pvb.Spec.Node,
|
||||
Phase: pvb.Status.Phase,
|
||||
SnapshotHandle: pvb.Status.SnapshotID,
|
||||
Size: pvb.Status.Progress.TotalBytes,
|
||||
IncrementalSize: pvb.Status.IncrementalBytes,
|
||||
UploaderType: pvb.Spec.UploaderType,
|
||||
VolumeName: pvb.Spec.Volume,
|
||||
PodName: pvb.Spec.Pod.Name,
|
||||
PodNamespace: pvb.Spec.Pod.Namespace,
|
||||
NodeName: pvb.Spec.Node,
|
||||
Phase: pvb.Status.Phase,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package volumehelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -33,8 +36,16 @@ type volumeHelperImpl struct {
|
||||
// to the volume policy check, but fs-backup is based on the pod resource,
|
||||
// the resource filter on PVC and PV doesn't work on this scenario.
|
||||
backupExcludePVC bool
|
||||
// pvcPodCache provides cached PVC to Pod mappings for improved performance.
|
||||
// When there are many PVCs and pods, using this cache avoids O(N*M) lookups.
|
||||
pvcPodCache *podvolumeutil.PVCPodCache
|
||||
}
|
||||
|
||||
// NewVolumeHelperImpl creates a VolumeHelper without PVC-to-Pod caching.
|
||||
//
|
||||
// Deprecated: Use NewVolumeHelperImplWithNamespaces or NewVolumeHelperImplWithCache instead
|
||||
// for better performance. These functions provide PVC-to-Pod caching which avoids O(N*M)
|
||||
// complexity when there are many PVCs and pods. See issue #9179 for details.
|
||||
func NewVolumeHelperImpl(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
@@ -43,6 +54,43 @@ func NewVolumeHelperImpl(
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
) VolumeHelper {
|
||||
// Pass nil namespaces - no cache will be built, so this never fails.
|
||||
// This is used by plugins that don't need the cache optimization.
|
||||
vh, _ := NewVolumeHelperImplWithNamespaces(
|
||||
volumePolicy,
|
||||
snapshotVolumes,
|
||||
logger,
|
||||
client,
|
||||
defaultVolumesToFSBackup,
|
||||
backupExcludePVC,
|
||||
nil,
|
||||
)
|
||||
return vh
|
||||
}
|
||||
|
||||
// NewVolumeHelperImplWithNamespaces creates a VolumeHelper with a PVC-to-Pod cache for improved performance.
|
||||
// The cache is built internally from the provided namespaces list.
|
||||
// This avoids O(N*M) complexity when there are many PVCs and pods.
|
||||
// See issue #9179 for details.
|
||||
// Returns an error if cache building fails - callers should not proceed with backup in this case.
|
||||
func NewVolumeHelperImplWithNamespaces(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
logger logrus.FieldLogger,
|
||||
client crclient.Client,
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
namespaces []string,
|
||||
) (VolumeHelper, error) {
|
||||
var pvcPodCache *podvolumeutil.PVCPodCache
|
||||
if len(namespaces) > 0 {
|
||||
pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
if err := pvcPodCache.BuildCacheForNamespaces(context.Background(), namespaces, client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.Infof("Built PVC-to-Pod cache for %d namespaces", len(namespaces))
|
||||
}
|
||||
|
||||
return &volumeHelperImpl{
|
||||
volumePolicy: volumePolicy,
|
||||
snapshotVolumes: snapshotVolumes,
|
||||
@@ -50,7 +98,33 @@ func NewVolumeHelperImpl(
|
||||
client: client,
|
||||
defaultVolumesToFSBackup: defaultVolumesToFSBackup,
|
||||
backupExcludePVC: backupExcludePVC,
|
||||
pvcPodCache: pvcPodCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewVolumeHelperImplWithCache creates a VolumeHelper using an externally managed PVC-to-Pod cache.
|
||||
// This is used by plugins that build the cache lazily per-namespace (following the pattern from PR #9226).
|
||||
// The cache can be nil, in which case PVC-to-Pod lookups will fall back to direct API calls.
|
||||
func NewVolumeHelperImplWithCache(
|
||||
backup velerov1api.Backup,
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
pvcPodCache *podvolumeutil.PVCPodCache,
|
||||
) (VolumeHelper, error) {
|
||||
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(backup, client, logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get volume policies from backup")
|
||||
}
|
||||
|
||||
return &volumeHelperImpl{
|
||||
volumePolicy: resourcePolicies,
|
||||
snapshotVolumes: backup.Spec.SnapshotVolumes,
|
||||
logger: logger,
|
||||
client: client,
|
||||
defaultVolumesToFSBackup: boolptr.IsSetToTrue(backup.Spec.DefaultVolumesToFsBackup),
|
||||
backupExcludePVC: boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData),
|
||||
pvcPodCache: pvcPodCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error) {
|
||||
@@ -105,10 +179,12 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
// If this PV is claimed, see if we've already taken a (pod volume backup)
|
||||
// snapshot of the contents of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
pods, err := podvolumeutil.GetPodsUsingPVC(
|
||||
// Use cached lookup if available for better performance with many PVCs/pods
|
||||
pods, err := podvolumeutil.GetPodsUsingPVCWithCache(
|
||||
pv.Spec.ClaimRef.Namespace,
|
||||
pv.Spec.ClaimRef.Name,
|
||||
v.client,
|
||||
v.pvcPodCache,
|
||||
)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get pod for PV %s", pv.Name)
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
)
|
||||
|
||||
func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
@@ -738,3 +739,498 @@ func TestGetVolumeFromResource(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "resource is not a PersistentVolume or Volume")
|
||||
})
|
||||
}
|
||||
|
||||
func TestVolumeHelperImplWithCache_ShouldPerformSnapshot(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputObj runtime.Object
|
||||
groupResource schema.GroupResource
|
||||
pod *corev1api.Pod
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
buildCache bool
|
||||
shouldSnapshot bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumePolicy match with cache, returns true",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
buildCache: true,
|
||||
shouldSnapshot: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumePolicy not match, fs-backup via opt-out with cache, skips snapshot",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp3-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
defaultVolumesToFSBackup: true,
|
||||
buildCache: true,
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Cache not built, falls back to direct lookup",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
buildCache: false,
|
||||
shouldSnapshot: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "No volume policy, defaultVolumesToFSBackup with cache, skips snapshot",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result(),
|
||||
resourcePolicies: nil,
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
defaultVolumesToFSBackup: true,
|
||||
buildCache: true,
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
objs := []runtime.Object{
|
||||
&corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
if tc.pod != nil {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
if tc.buildCache {
|
||||
namespaces = []string{"ns"}
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithNamespaces(
|
||||
p,
|
||||
tc.snapshotVolumesFlag,
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
tc.defaultVolumesToFSBackup,
|
||||
false,
|
||||
namespaces,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.inputObj)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, tc.groupResource)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldSnapshot, actualShouldSnapshot, "Want shouldSnapshot as %t; Got shouldSnapshot as %t", tc.shouldSnapshot, actualShouldSnapshot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeHelperImplWithCache_ShouldPerformFSBackup(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
resources []runtime.Object
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
buildCache bool
|
||||
shouldFSBackup bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumePolicy match with cache, return true",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.FSBackup,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildCache: true,
|
||||
shouldFSBackup: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumePolicy match with cache, action is snapshot, return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildCache: true,
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Cache not built, falls back to direct lookup, opt-in annotation",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToBackupAnnotation, "vol-1")).
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
buildCache: false,
|
||||
defaultVolumesToFSBackup: false,
|
||||
shouldFSBackup: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.resources...)
|
||||
if tc.pod != nil {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
if tc.buildCache {
|
||||
namespaces = []string{"ns"}
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithNamespaces(
|
||||
p,
|
||||
tc.snapshotVolumesFlag,
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
tc.defaultVolumesToFSBackup,
|
||||
false,
|
||||
namespaces,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldFSBackup, actualError := vh.ShouldPerformFSBackup(tc.pod.Spec.Volumes[0], *tc.pod)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldFSBackup, actualShouldFSBackup, "Want shouldFSBackup as %t; Got shouldFSBackup as %t", tc.shouldFSBackup, actualShouldFSBackup)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewVolumeHelperImplWithCache tests the NewVolumeHelperImplWithCache constructor
|
||||
// which is used by plugins that build the cache lazily per-namespace.
|
||||
func TestNewVolumeHelperImplWithCache(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
backup velerov1api.Backup
|
||||
resourcePolicyConfigMap *corev1api.ConfigMap
|
||||
pvcPodCache bool // whether to pass a cache
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "creates VolumeHelper with nil cache",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(false),
|
||||
},
|
||||
},
|
||||
pvcPodCache: false,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "creates VolumeHelper with non-nil cache",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(true),
|
||||
SnapshotMoveData: ptr.To(true),
|
||||
},
|
||||
},
|
||||
pvcPodCache: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "creates VolumeHelper with resource policies",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "resource-policy",
|
||||
},
|
||||
},
|
||||
},
|
||||
resourcePolicyConfigMap: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "resource-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"policy": `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
storageClass:
|
||||
- gp2-csi
|
||||
action:
|
||||
type: snapshot`,
|
||||
},
|
||||
},
|
||||
pvcPodCache: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "fails when resource policy ConfigMap not found",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "non-existent-policy",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvcPodCache: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
if tc.resourcePolicyConfigMap != nil {
|
||||
objs = append(objs, tc.resourcePolicyConfigMap)
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
var cache *podvolumeutil.PVCPodCache
|
||||
if tc.pvcPodCache {
|
||||
cache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithCache(
|
||||
tc.backup,
|
||||
fakeClient,
|
||||
logrus.StandardLogger(),
|
||||
cache,
|
||||
)
|
||||
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, vh)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewVolumeHelperImplWithCache_UsesCache verifies that the VolumeHelper created
|
||||
// via NewVolumeHelperImplWithCache actually uses the provided cache for lookups.
|
||||
func TestNewVolumeHelperImplWithCache_UsesCache(t *testing.T) {
|
||||
// Create a pod that uses a PVC via opt-out (defaultVolumesToFsBackup=true)
|
||||
pod := builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result()
|
||||
|
||||
pvc := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
}
|
||||
|
||||
pv := builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result()
|
||||
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, pvc, pv, pod)
|
||||
|
||||
// Build cache for the namespace
|
||||
cache := podvolumeutil.NewPVCPodCache()
|
||||
err := cache.BuildCacheForNamespace(t.Context(), "ns", fakeClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
backup := velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(true), // opt-out mode
|
||||
},
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithCache(backup, fakeClient, logrus.StandardLogger(), cache)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert PV to unstructured
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pv)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ShouldPerformSnapshot should return false because the volume is selected for fs-backup
|
||||
// This relies on the cache to find the pod using the PVC
|
||||
shouldSnapshot, err := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, kuberesource.PersistentVolumes)
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldSnapshot, "Expected snapshot to be skipped due to fs-backup selection via cache")
|
||||
}
|
||||
|
||||
@@ -288,7 +288,7 @@ const (
|
||||
|
||||
// BackupPhase is a string representation of the lifecycle phase
|
||||
// of a Velero backup.
|
||||
// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting
|
||||
// +kubebuilder:validation:Enum=New;Queued;ReadyToStart;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting
|
||||
type BackupPhase string
|
||||
|
||||
const (
|
||||
@@ -296,6 +296,12 @@ const (
|
||||
// yet processed by the BackupController.
|
||||
BackupPhaseNew BackupPhase = "New"
|
||||
|
||||
// BackupPhaseQueued means the backup has been added to the queue and is waiting for the Queue to move it out of the queue.
|
||||
BackupPhaseQueued BackupPhase = "Queued"
|
||||
|
||||
// BackupPhaseReadyToStart means the backup has been pulled from the queue and is ready to start.
|
||||
BackupPhaseReadyToStart BackupPhase = "ReadyToStart"
|
||||
|
||||
// BackupPhaseFailedValidation means the backup has failed
|
||||
// the controller's validations and therefore will not run.
|
||||
BackupPhaseFailedValidation BackupPhase = "FailedValidation"
|
||||
@@ -371,6 +377,11 @@ type BackupStatus struct {
|
||||
// +optional
|
||||
Phase BackupPhase `json:"phase,omitempty"`
|
||||
|
||||
// QueuePosition is the position of the backup in the queue.
|
||||
// Only relevant when Phase is "Queued"
|
||||
// +optional
|
||||
QueuePosition int `json:"queuePosition,omitempty"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable).
|
||||
// +optional
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -146,8 +148,15 @@ type ObjectStorageLocation struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
// Deprecated: Use CACertRef instead.
|
||||
// +optional
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
|
||||
// CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
// when verifying TLS connections to the provider. The Secret must be in the same
|
||||
// namespace as the BackupStorageLocation.
|
||||
// +optional
|
||||
CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation.
|
||||
@@ -177,3 +186,13 @@ const (
|
||||
|
||||
// TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus.
|
||||
// TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus.
|
||||
|
||||
// Validate validates the BackupStorageLocation to ensure that only one of CACert or CACertRef is set.
|
||||
func (bsl *BackupStorageLocation) Validate() error {
|
||||
if bsl.Spec.ObjectStorage != nil &&
|
||||
bsl.Spec.ObjectStorage.CACert != nil &&
|
||||
bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
return errors.New("cannot specify both caCert and caCertRef in objectStorage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
121
pkg/apis/velero/v1/backupstoragelocation_types_test.go
Normal file
121
pkg/apis/velero/v1/backupstoragelocation_types_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestBackupStorageLocationValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bsl *BackupStorageLocation
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid - neither CACert nor CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - only CACert set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("test-cert"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - only CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "ca-cert-secret",
|
||||
},
|
||||
Key: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - both CACert and CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("test-cert"),
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "ca-cert-secret",
|
||||
},
|
||||
Key: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "valid - no ObjectStorage",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := test.bsl.Validate()
|
||||
if test.expectError && err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
if !test.expectError && err != nil {
|
||||
t.Errorf("expected no error but got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -118,6 +118,10 @@ type PodVolumeBackupStatus struct {
|
||||
// +optional
|
||||
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
|
||||
|
||||
// IncrementalBytes holds the number of bytes new or changed since the last backup
|
||||
// +optional
|
||||
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
|
||||
|
||||
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
|
||||
// The server's time is used for AcceptedTimestamp
|
||||
// +optional
|
||||
@@ -134,6 +138,7 @@ type PodVolumeBackupStatus struct {
|
||||
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
|
||||
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
|
||||
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
|
||||
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
|
||||
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
|
||||
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"
|
||||
|
||||
@@ -58,6 +58,10 @@ type PodVolumeRestoreSpec struct {
|
||||
// Cancel indicates request to cancel the ongoing PodVolumeRestore. It can be set
|
||||
// when the PodVolumeRestore is in InProgress phase
|
||||
Cancel bool `json:"cancel,omitempty"`
|
||||
|
||||
// SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
// +optional
|
||||
SnapshotSize int64 `json:"snapshotSize,omitempty"`
|
||||
}
|
||||
|
||||
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
|
||||
|
||||
@@ -915,6 +915,11 @@ func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) {
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.CACertRef != nil {
|
||||
in, out := &in.CACertRef, &out.CACertRef
|
||||
*out = new(corev1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation.
|
||||
|
||||
@@ -58,6 +58,10 @@ type DataDownloadSpec struct {
|
||||
// NodeOS is OS of the node where the DataDownload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
|
||||
// SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
// +optional
|
||||
SnapshotSize int64 `json:"snapshotSize,omitempty"`
|
||||
}
|
||||
|
||||
// TargetVolumeSpec is the specification for a target PVC.
|
||||
|
||||
@@ -155,6 +155,10 @@ type DataUploadStatus struct {
|
||||
// +optional
|
||||
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
|
||||
|
||||
// IncrementalBytes holds the number of bytes new or changed since the last backup
|
||||
// +optional
|
||||
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
|
||||
|
||||
// Node is name of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
Node string `json:"node,omitempty"`
|
||||
@@ -185,6 +189,7 @@ type DataUploadStatus struct {
|
||||
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started"
|
||||
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
|
||||
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
|
||||
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
|
||||
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created"
|
||||
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed"
|
||||
@@ -244,4 +249,8 @@ type DataUploadResult struct {
|
||||
// NodeOS is OS of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
|
||||
// SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
// +optional
|
||||
SnapshotSize int64 `json:"snapshotSize,omitempty"`
|
||||
}
|
||||
|
||||
@@ -76,14 +76,8 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
pvc.Spec.Selector = nil
|
||||
}
|
||||
|
||||
// remove label selectors with "velero.io/" prefixing in the key which is left by Velero restore
|
||||
if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil {
|
||||
for k := range pvc.Spec.Selector.MatchLabels {
|
||||
if strings.HasPrefix(k, "velero.io/") {
|
||||
delete(pvc.Spec.Selector.MatchLabels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean stale Velero labels from PVC metadata and selector
|
||||
a.cleanupStaleVeleroLabels(pvc, backup)
|
||||
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
@@ -92,3 +86,50 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
|
||||
return &unstructured.Unstructured{Object: pvcMap}, actionhelpers.RelatedItemsForPVC(pvc, a.log), nil
|
||||
}
|
||||
|
||||
// cleanupStaleVeleroLabels removes stale Velero labels from both the PVC metadata
|
||||
// and the selector's match labels to ensure clean backups
|
||||
func (a *PVCAction) cleanupStaleVeleroLabels(pvc *corev1api.PersistentVolumeClaim, backup *v1.Backup) {
|
||||
// Clean stale Velero labels from selector match labels
|
||||
if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil {
|
||||
for k := range pvc.Spec.Selector.MatchLabels {
|
||||
if strings.HasPrefix(k, "velero.io/") {
|
||||
a.log.Infof("Deleting stale Velero label %s from PVC %s selector", k, pvc.Name)
|
||||
delete(pvc.Spec.Selector.MatchLabels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean stale Velero labels from main metadata
|
||||
if pvc.Labels != nil {
|
||||
for k, v := range pvc.Labels {
|
||||
// Only remove labels that are clearly stale from previous operations
|
||||
shouldRemove := false
|
||||
|
||||
// Always remove restore-name labels as these are from previous restores
|
||||
if k == v1.RestoreNameLabel {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if k == v1.MustIncludeAdditionalItemAnnotation {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
// Remove backup-name labels that don't match current backup
|
||||
if k == v1.BackupNameLabel && v != backup.Name {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
// Remove volume-snapshot-name labels from previous CSI backups
|
||||
// Note: If this backup creates new CSI snapshots, the CSI action will add them back
|
||||
if k == v1.VolumeSnapshotLabel {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
a.log.Infof("Deleting stale Velero label %s=%s from PVC %s", k, v, pvc.Name)
|
||||
delete(pvc.Labels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,3 +149,176 @@ func TestBackupPVAction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, additional)
|
||||
}
|
||||
|
||||
func TestCleanupStaleVeleroLabels(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputPVC *corev1api.PersistentVolumeClaim
|
||||
backup *v1.Backup
|
||||
expectedLabels map[string]string
|
||||
expectedSelector *metav1.LabelSelector
|
||||
}{
|
||||
{
|
||||
name: "removes restore-name labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes backup-name labels that don't match current backup",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "keeps backup-name labels that match current backup",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/backup-name": "current-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"velero.io/backup-name": "current-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes volume-snapshot-name labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/volume-snapshot-name": "old-snapshot",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes velero labels from selector match labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: nil,
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles PVC with no labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: nil,
|
||||
},
|
||||
{
|
||||
name: "handles PVC with no selector",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
expectedSelector: nil,
|
||||
},
|
||||
{
|
||||
name: "removes multiple stale velero labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"velero.io/volume-snapshot-name": "old-snapshot",
|
||||
"app": "myapp",
|
||||
"env": "prod",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
"env": "prod",
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
action := NewPVCAction(velerotest.NewLogger())
|
||||
|
||||
// Create a copy of the input PVC to avoid modifying the test case
|
||||
pvcCopy := tc.inputPVC.DeepCopy()
|
||||
|
||||
action.cleanupStaleVeleroLabels(pvcCopy, tc.backup)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, pvcCopy.Labels, "Labels should match expected values")
|
||||
assert.Equal(t, tc.expectedSelector, pvcCopy.Spec.Selector, "Selector should match expected values")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
internalvolumehelper "github.com/vmware-tanzu/velero/internal/volumehelper"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
@@ -57,6 +58,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
)
|
||||
|
||||
// TODO: Replace hardcoded VolumeSnapshot finalizer strings with constants from
|
||||
@@ -72,6 +74,14 @@ const (
|
||||
type pvcBackupItemAction struct {
|
||||
log logrus.FieldLogger
|
||||
crClient crclient.Client
|
||||
|
||||
// pvcPodCache provides lazy per-namespace caching of PVC-to-Pod mappings.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this
|
||||
// without mutex or backup UID tracking.
|
||||
// This avoids the O(N*M) performance issue when there are many PVCs and pods.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
pvcPodCache *podvolumeutil.PVCPodCache
|
||||
}
|
||||
|
||||
// AppliesTo returns information indicating that the PVCBackupItemAction
|
||||
@@ -97,6 +107,59 @@ func (p *pvcBackupItemAction) validateBackup(backup velerov1api.Backup) (valid b
|
||||
return true
|
||||
}
|
||||
|
||||
// ensurePVCPodCacheForNamespace ensures the PVC-to-Pod cache is built for the given namespace.
|
||||
// This uses lazy per-namespace caching following the pattern from PR #9226.
|
||||
// Since plugin instances are unique per backup, we can safely cache without mutex or backup UID tracking.
|
||||
func (p *pvcBackupItemAction) ensurePVCPodCacheForNamespace(ctx context.Context, namespace string) error {
|
||||
// Initialize cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
// Build cache for namespace if not already done
|
||||
if !p.pvcPodCache.IsNamespaceBuilt(namespace) {
|
||||
p.log.Debugf("Building PVC-to-Pod cache for namespace %s", namespace)
|
||||
if err := p.pvcPodCache.BuildCacheForNamespace(ctx, namespace, p.crClient); err != nil {
|
||||
return errors.Wrapf(err, "failed to build PVC-to-Pod cache for namespace %s", namespace)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolumeHelperWithCache creates a VolumeHelper using the pre-built PVC-to-Pod cache.
|
||||
// The cache should be ensured for the relevant namespace(s) before calling this.
|
||||
func (p *pvcBackupItemAction) getVolumeHelperWithCache(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
// Create VolumeHelper with our lazy-built cache
|
||||
vh, err := internalvolumehelper.NewVolumeHelperImplWithCache(
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
p.pvcPodCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create VolumeHelper")
|
||||
}
|
||||
return vh, nil
|
||||
}
|
||||
|
||||
// getOrCreateVolumeHelper returns a VolumeHelper with lazy per-namespace caching.
|
||||
// The VolumeHelper uses the pvcPodCache which is populated lazily as namespaces are encountered.
|
||||
// Callers should use ensurePVCPodCacheForNamespace before calling methods that need
|
||||
// PVC-to-Pod lookups for a specific namespace.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
func (p *pvcBackupItemAction) getOrCreateVolumeHelper(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
// Initialize the PVC-to-Pod cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
// Return the VolumeHelper with our lazily-built cache
|
||||
// The cache will be populated incrementally as namespaces are encountered
|
||||
return p.getVolumeHelperWithCache(backup)
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) validatePVCandPV(
|
||||
pvc corev1api.PersistentVolumeClaim,
|
||||
item runtime.Unstructured,
|
||||
@@ -248,12 +311,24 @@ func (p *pvcBackupItemAction) Execute(
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
|
||||
// Ensure PVC-to-Pod cache is built for this namespace (lazy per-namespace caching)
|
||||
if err := p.ensurePVCPodCacheForNamespace(context.TODO(), pvc.Namespace); err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
// Get or create the cached VolumeHelper for this backup
|
||||
vh, err := p.getOrCreateVolumeHelper(backup)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
item,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
@@ -621,8 +696,41 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace)
|
||||
}
|
||||
|
||||
// Ensure PVC-to-Pod cache is built for this namespace (lazy per-namespace caching)
|
||||
if err := p.ensurePVCPodCacheForNamespace(ctx, pvc.Namespace); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build PVC-to-Pod cache for namespace %s", pvc.Namespace)
|
||||
}
|
||||
|
||||
// Get the cached VolumeHelper for filtering PVCs by volume policy
|
||||
vh, err := p.getOrCreateVolumeHelper(backup)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get VolumeHelper for filtering PVCs in group %q", group)
|
||||
}
|
||||
|
||||
// Filter PVCs by volume policy
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup, vh)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
|
||||
// Warn if any PVCs were filtered out
|
||||
if len(filteredPVCs) < len(groupedPVCs) {
|
||||
for _, originalPVC := range groupedPVCs {
|
||||
found := false
|
||||
for _, filteredPVC := range filteredPVCs {
|
||||
if originalPVC.Name == filteredPVC.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
p.log.Warnf("PVC %s/%s has VolumeGroupSnapshot label %s=%s but is excluded by volume policy", originalPVC.Namespace, originalPVC.Name, vgsLabelKey, group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the CSI driver for the grouped PVCs
|
||||
driver, err := p.determineCSIDriver(groupedPVCs)
|
||||
driver, err := p.determineCSIDriver(filteredPVCs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to determine CSI driver for PVCs in VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
@@ -643,7 +751,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
}
|
||||
|
||||
// Wait for all the VS objects associated with the VGS to have status and VGS Name (VS readiness is checked in legacy flow) and get the PVC-to-VS map
|
||||
vsMap, err := p.waitForVGSAssociatedVS(ctx, groupedPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
vsMap, err := p.waitForVGSAssociatedVS(ctx, filteredPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "timeout waiting for VolumeSnapshots to have status created via VolumeGroupSnapshot %s", newVGS.Name)
|
||||
}
|
||||
@@ -734,6 +842,43 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
|
||||
return pvcList.Items, nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
backup *velerov1api.Backup,
|
||||
vh internalvolumehelper.VolumeHelper,
|
||||
) ([]corev1api.PersistentVolumeClaim, error) {
|
||||
var filteredPVCs []corev1api.PersistentVolumeClaim
|
||||
|
||||
for _, pvc := range pvcs {
|
||||
// Convert PVC to unstructured for ShouldPerformSnapshotWithVolumeHelper
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
// Check if this PVC should be snapshotted according to volume policies
|
||||
// Uses the cached VolumeHelper for better performance with many PVCs/pods
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
|
||||
if shouldSnapshot {
|
||||
filteredPVCs = append(filteredPVCs, pvc)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredPVCs, nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) determineCSIDriver(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
) (string, error) {
|
||||
|
||||
@@ -586,6 +586,387 @@ func TestListGroupedPVCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterPVCsByVolumePolicy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvcs []corev1api.PersistentVolumeClaim
|
||||
pvs []corev1api.PersistentVolume
|
||||
volumePolicyStr string
|
||||
expectCount int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "All PVCs should be included when no volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-1", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-1",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-2",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-1"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-2"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCount: 2,
|
||||
},
|
||||
{
|
||||
name: "Filter out NFS PVC by volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-csi",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 1,
|
||||
},
|
||||
{
|
||||
name: "All PVCs filtered out by volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-1", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs-1",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-2", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs-2",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-1"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export/1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-2"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export/2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 0,
|
||||
},
|
||||
{
|
||||
name: "Filter out non-CSI PVCs from mixed driver group",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-linstor",
|
||||
Namespace: "ns-1",
|
||||
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-linstor",
|
||||
StorageClassName: pointer.String("sc-linstor"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-nfs",
|
||||
Namespace: "ns-1",
|
||||
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-linstor"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "linstor.csi.linbit.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
objs := []runtime.Object{}
|
||||
for i := range tt.pvs {
|
||||
objs = append(objs, &tt.pvs[i])
|
||||
}
|
||||
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{},
|
||||
}
|
||||
|
||||
// Add volume policy ConfigMap if specified
|
||||
if tt.volumePolicyStr != "" {
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "volume-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"volume-policy": tt.volumePolicyStr,
|
||||
},
|
||||
}
|
||||
require.NoError(t, client.Create(t.Context(), cm))
|
||||
|
||||
backup.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "volume-policy",
|
||||
}
|
||||
}
|
||||
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
// Pass nil for VolumeHelper in tests - it will fall back to creating a new one per call
|
||||
// This is the expected behavior for testing and third-party plugins
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup, nil)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result, tt.expectCount)
|
||||
|
||||
// For mixed driver scenarios, verify filtered result can determine single CSI driver
|
||||
if tt.name == "Filter out non-CSI PVCs from mixed driver group" && len(result) > 0 {
|
||||
driver, err := action.determineCSIDriver(result)
|
||||
require.NoError(t, err, "After filtering, determineCSIDriver should not fail with multiple drivers error")
|
||||
require.Equal(t, "linstor.csi.linbit.com", driver, "Should have the Linstor driver after filtering out NFS")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterPVCsByVolumePolicyWithVolumeHelper tests filterPVCsByVolumePolicy when a
|
||||
// pre-created VolumeHelper is passed (non-nil). This exercises the cached path used
|
||||
// by the CSI PVC BIA plugin for better performance.
|
||||
func TestFilterPVCsByVolumePolicyWithVolumeHelper(t *testing.T) {
|
||||
// Create test PVCs and PVs
|
||||
pvcs := []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-csi",
|
||||
StorageClassName: pointer.String("sc-csi"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
}
|
||||
|
||||
pvs := []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake client with PVs
|
||||
objs := []runtime.Object{}
|
||||
for i := range pvs {
|
||||
objs = append(objs, &pvs[i])
|
||||
}
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
// Create backup with volume policy that skips NFS volumes
|
||||
volumePolicyStr := `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "volume-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"volume-policy": volumePolicyStr,
|
||||
},
|
||||
}
|
||||
require.NoError(t, client.Create(t.Context(), cm))
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "volume-policy",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
// Create a VolumeHelper using the same method the plugin would use
|
||||
vh, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
|
||||
// Test with the pre-created VolumeHelper (non-nil path)
|
||||
result, err := action.filterPVCsByVolumePolicy(pvcs, backup, vh)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should filter out the NFS PVC, leaving only the CSI PVC
|
||||
require.Len(t, result, 1)
|
||||
require.Equal(t, "pvc-csi", result[0].Name)
|
||||
}
|
||||
|
||||
func TestDetermineCSIDriver(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1685,3 +2066,42 @@ func TestPVCRequestSize(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetOrCreateVolumeHelper tests the VolumeHelper and PVC-to-Pod cache behavior.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we verify that the pvcPodCache
|
||||
// is properly initialized and reused across calls.
|
||||
func TestGetOrCreateVolumeHelper(t *testing.T) {
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
UID: types.UID("test-uid-1"),
|
||||
},
|
||||
}
|
||||
|
||||
// Initially, pvcPodCache should be nil
|
||||
require.Nil(t, action.pvcPodCache, "pvcPodCache should be nil initially")
|
||||
|
||||
// Get VolumeHelper first time - should create new cache and VolumeHelper
|
||||
vh1, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh1)
|
||||
|
||||
// pvcPodCache should now be initialized
|
||||
require.NotNil(t, action.pvcPodCache, "pvcPodCache should be initialized after first call")
|
||||
cache1 := action.pvcPodCache
|
||||
|
||||
// Get VolumeHelper second time - should reuse the same cache
|
||||
vh2, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh2)
|
||||
|
||||
// The pvcPodCache should be the same instance
|
||||
require.Same(t, cache1, action.pvcPodCache, "Expected same pvcPodCache instance on repeated calls")
|
||||
}
|
||||
|
||||
@@ -84,17 +84,6 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
return nil, nil, "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
additionalItems := make([]velero.ResourceIdentifier, 0)
|
||||
if vs.Spec.VolumeSnapshotClassName != nil {
|
||||
additionalItems = append(
|
||||
additionalItems,
|
||||
velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.VolumeSnapshotClasses,
|
||||
Name: *vs.Spec.VolumeSnapshotClassName,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
|
||||
p.log.
|
||||
@@ -105,6 +94,24 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
additionalItems := make([]velero.ResourceIdentifier, 0)
|
||||
|
||||
if vs.Spec.VolumeSnapshotClassName != nil {
|
||||
// This is still needed to add the VolumeSnapshotClass to the backup.
|
||||
// The secret with VolumeSnapshotClass is still relevant to backup.
|
||||
additionalItems = append(
|
||||
additionalItems,
|
||||
velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.VolumeSnapshotClasses,
|
||||
Name: *vs.Spec.VolumeSnapshotClassName,
|
||||
},
|
||||
)
|
||||
|
||||
// Because async operation will update VolumeSnapshot during finalizing phase.
|
||||
// No matter what we do, VolumeSnapshotClass cannot be deleted. So skip it.
|
||||
// Just deleting VolumeSnapshotClass during restore and delete is enough.
|
||||
}
|
||||
|
||||
p.log.Infof("Getting VolumesnapshotContent for Volumesnapshot %s/%s",
|
||||
vs.Namespace, vs.Name)
|
||||
|
||||
|
||||
@@ -97,6 +97,10 @@ func (p *volumeSnapshotContentBackupItemAction) Execute(
|
||||
})
|
||||
}
|
||||
|
||||
// Because async operation will update VolumeSnapshotContent during finalizing phase.
|
||||
// No matter what we do, VolumeSnapshotClass cannot be deleted. So skip it.
|
||||
// Just deleting VolumeSnapshotClass during restore and delete is enough.
|
||||
|
||||
snapContMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&snapCont)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, errors.WithStack(err)
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
expectedItems []velero.ResourceIdentifier
|
||||
}{
|
||||
{
|
||||
name: "Invalid VolumeSnapshotClass",
|
||||
name: "Invalid VolumeSnapshotContent",
|
||||
item: velerotest.UnstructuredOrDie(
|
||||
`
|
||||
{
|
||||
|
||||
@@ -117,7 +117,6 @@ type kubernetesBackupper struct {
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
podVolumeBackupperFactory podvolume.BackupperFactory
|
||||
podVolumeTimeout time.Duration
|
||||
podVolumeContext context.Context
|
||||
defaultVolumesToFsBackup bool
|
||||
clientPageSize int
|
||||
uploaderType string
|
||||
@@ -168,10 +167,39 @@ func NewKubernetesBackupper(
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getNamespaceIncludesExcludes returns an IncludesExcludes list containing which namespaces to
|
||||
// include and exclude from the backup.
|
||||
func getNamespaceIncludesExcludes(backup *velerov1api.Backup) *collections.IncludesExcludes {
|
||||
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
|
||||
// getNamespaceIncludesExcludesAndArgoCDNamespaces returns an IncludesExcludes list containing which namespaces to
|
||||
// include and exclude from the backup and a list of namespaces managed by ArgoCD.
|
||||
func getNamespaceIncludesExcludesAndArgoCDNamespaces(backup *velerov1api.Backup, kbClient kbclient.Client) (*collections.NamespaceIncludesExcludes, []string, error) {
|
||||
nsList := corev1api.NamespaceList{}
|
||||
activeNamespaces := []string{}
|
||||
nsManagedByArgoCD := []string{}
|
||||
if err := kbClient.List(context.Background(), &nsList); err != nil {
|
||||
return nil, nsManagedByArgoCD, err
|
||||
}
|
||||
for _, ns := range nsList.Items {
|
||||
activeNamespaces = append(activeNamespaces, ns.Name)
|
||||
}
|
||||
|
||||
// Set ActiveNamespaces first, then set includes/excludes
|
||||
includesExcludes := collections.NewNamespaceIncludesExcludes().
|
||||
ActiveNamespaces(activeNamespaces).
|
||||
Includes(backup.Spec.IncludedNamespaces...).
|
||||
Excludes(backup.Spec.ExcludedNamespaces...)
|
||||
|
||||
// Expand wildcards if needed
|
||||
if err := includesExcludes.ExpandIncludesExcludes(); err != nil {
|
||||
return nil, []string{}, err
|
||||
}
|
||||
|
||||
// Check for ArgoCD managed namespaces in the namespaces that will be included
|
||||
for _, ns := range nsList.Items {
|
||||
nsLabels := ns.GetLabels()
|
||||
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 && includesExcludes.ShouldInclude(ns.Name) {
|
||||
nsManagedByArgoCD = append(nsManagedByArgoCD, ns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return includesExcludes, nsManagedByArgoCD, nil
|
||||
}
|
||||
|
||||
func getResourceHooks(hookSpecs []velerov1api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]hook.ResourceHook, error) {
|
||||
@@ -245,8 +273,35 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
if err := kb.writeBackupVersion(tw); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
var err error
|
||||
var nsManagedByArgoCD []string
|
||||
backupRequest.NamespaceIncludesExcludes, nsManagedByArgoCD, err = getNamespaceIncludesExcludesAndArgoCDNamespaces(backupRequest.Backup, kb.kbClient)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("error getting namespace includes/excludes")
|
||||
return err
|
||||
}
|
||||
|
||||
if backupRequest.NamespaceIncludesExcludes.IsWildcardExpanded() {
|
||||
expandedIncludes := backupRequest.NamespaceIncludesExcludes.GetIncludes()
|
||||
expandedExcludes := backupRequest.NamespaceIncludesExcludes.GetExcludes()
|
||||
|
||||
// Get the final namespace list after wildcard expansion
|
||||
wildcardResult, err := backupRequest.NamespaceIncludesExcludes.ResolveNamespaceList()
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("error resolving namespace list")
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"expandedIncludes": expandedIncludes,
|
||||
"expandedExcludes": expandedExcludes,
|
||||
"wildcardResult": wildcardResult,
|
||||
"includedCount": len(expandedIncludes),
|
||||
"excludedCount": len(expandedExcludes),
|
||||
"resultCount": len(wildcardResult),
|
||||
}).Info("Successfully expanded wildcard patterns")
|
||||
}
|
||||
|
||||
backupRequest.NamespaceIncludesExcludes = getNamespaceIncludesExcludes(backupRequest.Backup)
|
||||
log.Infof("Including namespaces: %s", backupRequest.NamespaceIncludesExcludes.IncludesString())
|
||||
log.Infof("Excluding namespaces: %s", backupRequest.NamespaceIncludesExcludes.ExcludesString())
|
||||
|
||||
@@ -254,12 +309,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
// We will check for the existence of a ArgoCD label in the includedNamespaces and add a warning
|
||||
// so that users are at least aware about the existence of argoCD managed ns in their backup
|
||||
// Related Issue: https://github.com/vmware-tanzu/velero/issues/7905
|
||||
if len(backupRequest.Spec.IncludedNamespaces) > 0 {
|
||||
nsManagedByArgoCD := getNamespacesManagedByArgoCD(kb.kbClient, backupRequest.Spec.IncludedNamespaces, log)
|
||||
|
||||
if len(nsManagedByArgoCD) > 0 {
|
||||
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
|
||||
}
|
||||
if len(nsManagedByArgoCD) > 0 {
|
||||
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
|
||||
}
|
||||
|
||||
if collections.UseOldResourceFilters(backupRequest.Spec) {
|
||||
@@ -284,7 +335,6 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
|
||||
log.Infof("Backing up all volumes using pod volume backup: %t", boolptr.IsSetToTrue(backupRequest.Backup.Spec.DefaultVolumesToFsBackup))
|
||||
|
||||
var err error
|
||||
backupRequest.ResourceHooks, err = getResourceHooks(backupRequest.Spec.Hooks.Resources, kb.discoveryHelper)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Debugf("Error from getResourceHooks")
|
||||
@@ -314,12 +364,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
|
||||
var podVolumeCancelFunc context.CancelFunc
|
||||
kb.podVolumeContext, podVolumeCancelFunc = context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
podVolumeContext, podVolumeCancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
defer podVolumeCancelFunc()
|
||||
|
||||
var podVolumeBackupper podvolume.Backupper
|
||||
if kb.podVolumeBackupperFactory != nil {
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(kb.podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper")
|
||||
return errors.WithStack(err)
|
||||
@@ -358,6 +408,28 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
|
||||
// Resolve namespaces for PVC-to-Pod cache building in volumehelper.
|
||||
// See issue #9179 for details.
|
||||
namespaces, err := backupRequest.NamespaceIncludesExcludes.ResolveNamespaceList()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to resolve namespace list for PVC-to-Pod cache")
|
||||
return err
|
||||
}
|
||||
|
||||
volumeHelperImpl, err := volumehelper.NewVolumeHelperImplWithNamespaces(
|
||||
backupRequest.ResPolicies,
|
||||
backupRequest.Spec.SnapshotVolumes,
|
||||
log,
|
||||
kb.kbClient,
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
namespaces,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to build PVC-to-Pod cache for volume policy lookups")
|
||||
return err
|
||||
}
|
||||
|
||||
itemBackupper := &itemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
@@ -365,20 +437,14 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
kbClient: kb.kbClient,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
podVolumeBackupper: podVolumeBackupper,
|
||||
podVolumeContext: podVolumeContext,
|
||||
podVolumeSnapshotTracker: podvolume.NewTracker(),
|
||||
volumeSnapshotterCache: NewVolumeSnapshotterCache(volumeSnapshotterGetter),
|
||||
itemHookHandler: &hook.DefaultItemHookHandler{
|
||||
PodCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
volumeHelperImpl: volumehelper.NewVolumeHelperImpl(
|
||||
backupRequest.ResPolicies,
|
||||
backupRequest.Spec.SnapshotVolumes,
|
||||
log,
|
||||
kb.kbClient,
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
),
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
volumeHelperImpl: volumeHelperImpl,
|
||||
kubernetesBackupper: kb,
|
||||
}
|
||||
|
||||
@@ -546,7 +612,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
log.Infof("Backing Up Item Block including %s %s/%s (%v items in block)", items[i].groupResource.String(), items[i].namespace, items[i].name, len(itemBlock.Items))
|
||||
|
||||
wg.Add(1)
|
||||
backupRequest.ItemBlockChannel <- ItemBlockInput{
|
||||
backupRequest.WorkerPool.GetInputChannel() <- ItemBlockInput{
|
||||
itemBlock: itemBlock,
|
||||
returnChan: itemBlockReturn,
|
||||
}
|
||||
@@ -797,7 +863,7 @@ func (kb *kubernetesBackupper) handleItemBlockPostHooks(itemBlock *BackupItemBlo
|
||||
log := itemBlock.Log
|
||||
|
||||
// the post hooks will not execute until all PVBs of the item block pods are processed
|
||||
if err := kb.waitUntilPVBsProcessed(kb.podVolumeContext, log, itemBlock, hookPods); err != nil {
|
||||
if err := kb.waitUntilPVBsProcessed(itemBlock.itemBackupper.podVolumeContext, log, itemBlock, hookPods); err != nil {
|
||||
log.WithError(err).Error("failed to wait PVBs processed for the ItemBlock")
|
||||
return
|
||||
}
|
||||
@@ -1198,6 +1264,7 @@ func updateVolumeInfos(
|
||||
volumeInfos[index].SnapshotDataMovementInfo.SnapshotHandle = dataUpload.Status.SnapshotID
|
||||
volumeInfos[index].SnapshotDataMovementInfo.RetainedSnapshot = dataUpload.Spec.CSISnapshot.VolumeSnapshot
|
||||
volumeInfos[index].SnapshotDataMovementInfo.Size = dataUpload.Status.Progress.TotalBytes
|
||||
volumeInfos[index].SnapshotDataMovementInfo.IncrementalSize = dataUpload.Status.IncrementalBytes
|
||||
volumeInfos[index].SnapshotDataMovementInfo.Phase = dataUpload.Status.Phase
|
||||
|
||||
if dataUpload.Status.Phase == velerov2alpha1.DataUploadPhaseCompleted {
|
||||
@@ -1255,26 +1322,3 @@ func putVolumeInfos(
|
||||
|
||||
return backupStore.PutBackupVolumeInfos(backupName, backupVolumeInfoBuf)
|
||||
}
|
||||
|
||||
func getNamespacesManagedByArgoCD(kbClient kbclient.Client, includedNamespaces []string, log logrus.FieldLogger) []string {
|
||||
var nsManagedByArgoCD []string
|
||||
|
||||
for _, nsName := range includedNamespaces {
|
||||
ns := corev1api.Namespace{}
|
||||
if err := kbClient.Get(context.Background(), kbclient.ObjectKey{Name: nsName}, &ns); err != nil {
|
||||
// check for only those ns that exist and are included in backup
|
||||
// here we ignore cases like "" or "*" specified under includedNamespaces
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("error getting namespace %s", nsName)
|
||||
continue
|
||||
}
|
||||
|
||||
nsLabels := ns.GetLabels()
|
||||
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 {
|
||||
nsManagedByArgoCD = append(nsManagedByArgoCD, nsName)
|
||||
}
|
||||
}
|
||||
return nsManagedByArgoCD
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
@@ -141,7 +141,7 @@ func TestBackupProgressIsUpdated(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -881,7 +881,7 @@ func TestBackupOldResourceFiltering(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1062,7 +1062,7 @@ func TestCRDInclusion(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1161,7 +1161,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1190,7 +1190,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
backup1File := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -1206,7 +1206,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
backup2File := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -1260,7 +1260,7 @@ func TestBackupResourceOrdering(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1381,7 +1381,7 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
Backup: defaultBackup().SnapshotVolumes(false).Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
resPolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
@@ -1428,8 +1428,8 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
},
|
||||
includedPVs: map[string]struct{}{},
|
||||
},
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVCs(
|
||||
@@ -1679,7 +1679,7 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1764,7 +1764,7 @@ func TestBackupWithInvalidActions(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1918,7 +1918,7 @@ func TestBackupActionModifications(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2178,7 +2178,7 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2439,7 +2439,7 @@ func TestItemBlockActionsRunForCorrectItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2524,7 +2524,7 @@ func TestBackupWithInvalidItemBlockActions(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2780,7 +2780,7 @@ func TestItemBlockActionRelatedItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2948,7 +2948,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -2984,7 +2984,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3021,7 +3021,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3058,7 +3058,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3095,7 +3095,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3130,7 +3130,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3148,7 +3148,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3169,7 +3169,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3188,7 +3188,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3210,7 +3210,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3344,7 +3344,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3376,7 +3376,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3408,7 +3408,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3494,7 +3494,7 @@ func TestBackupWithInvalidHooks(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -3968,7 +3968,7 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
podCommandExecutor = new(test.MockPodCommandExecutor)
|
||||
@@ -4193,7 +4193,7 @@ func TestBackupWithPodVolume(t *testing.T) {
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{tc.vsl},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5312,7 +5312,7 @@ func TestBackupNewResourceFiltering(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5477,7 +5477,7 @@ func TestBackupNamespaces(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5578,6 +5578,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
IncrementalBytes(500).
|
||||
Phase(velerov2alpha1.DataUploadPhaseFailed).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
@@ -5603,6 +5604,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
RetainedSnapshot: "vs-1",
|
||||
SnapshotHandle: "snapshot-id",
|
||||
Size: 1000,
|
||||
IncrementalSize: 500,
|
||||
Phase: velerov2alpha1.DataUploadPhaseFailed,
|
||||
},
|
||||
},
|
||||
@@ -5616,6 +5618,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
IncrementalBytes(500).
|
||||
Phase(velerov2alpha1.DataUploadPhaseCompleted).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
@@ -5641,6 +5644,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
RetainedSnapshot: "vs-1",
|
||||
SnapshotHandle: "snapshot-id",
|
||||
Size: 1000,
|
||||
IncrementalSize: 500,
|
||||
Phase: velerov2alpha1.DataUploadPhaseCompleted,
|
||||
},
|
||||
},
|
||||
@@ -5655,6 +5659,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
IncrementalBytes(500).
|
||||
Phase(velerov2alpha1.DataUploadPhaseCompleted).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
|
||||
@@ -69,6 +69,7 @@ type itemBackupper struct {
|
||||
kbClient kbClient.Client
|
||||
discoveryHelper discovery.Helper
|
||||
podVolumeBackupper podvolume.Backupper
|
||||
podVolumeContext context.Context
|
||||
podVolumeSnapshotTracker *podvolume.Tracker
|
||||
kubernetesBackupper *kubernetesBackupper
|
||||
volumeSnapshotterCache *VolumeSnapshotterCache
|
||||
|
||||
@@ -71,7 +71,7 @@ type itemCollector struct {
|
||||
type nsTracker struct {
|
||||
singleLabelSelector labels.Selector
|
||||
orLabelSelector []labels.Selector
|
||||
namespaceFilter *collections.IncludesExcludes
|
||||
namespaceFilter *collections.NamespaceIncludesExcludes
|
||||
logger logrus.FieldLogger
|
||||
|
||||
namespaceMap map[string]bool
|
||||
@@ -103,7 +103,7 @@ func (nt *nsTracker) init(
|
||||
unstructuredNSs []unstructured.Unstructured,
|
||||
singleLabelSelector labels.Selector,
|
||||
orLabelSelector []labels.Selector,
|
||||
namespaceFilter *collections.IncludesExcludes,
|
||||
namespaceFilter *collections.NamespaceIncludesExcludes,
|
||||
logger logrus.FieldLogger,
|
||||
) {
|
||||
if nt.namespaceMap == nil {
|
||||
@@ -635,7 +635,7 @@ func coreGroupResourcePriority(resource string) int {
|
||||
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
||||
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
|
||||
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
|
||||
func getNamespacesToList(ie *collections.IncludesExcludes) []string {
|
||||
func getNamespacesToList(ie *collections.NamespaceIncludesExcludes) []string {
|
||||
if ie == nil {
|
||||
return []string{""}
|
||||
}
|
||||
@@ -753,21 +753,28 @@ func (r *itemCollector) collectNamespaces(
|
||||
}
|
||||
|
||||
unstructuredList, err := resourceClient.List(metav1.ListOptions{})
|
||||
|
||||
activeNamespacesHashSet := make(map[string]bool)
|
||||
for _, namespace := range unstructuredList.Items {
|
||||
activeNamespacesHashSet[namespace.GetName()] = true
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("error list namespaces")
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, includedNSName := range r.backupRequest.Backup.Spec.IncludedNamespaces {
|
||||
// Change to look at the struct includes/excludes
|
||||
// In case wildcards are expanded, we need to look at the struct includes/excludes
|
||||
for _, includedNSName := range r.backupRequest.NamespaceIncludesExcludes.GetIncludes() {
|
||||
nsExists := false
|
||||
// Skip checking the namespace existing when it's "*".
|
||||
if includedNSName == "*" {
|
||||
continue
|
||||
}
|
||||
for _, unstructuredNS := range unstructuredList.Items {
|
||||
if unstructuredNS.GetName() == includedNSName {
|
||||
nsExists = true
|
||||
}
|
||||
|
||||
if _, ok := activeNamespacesHashSet[includedNSName]; ok {
|
||||
nsExists = true
|
||||
}
|
||||
|
||||
if !nsExists {
|
||||
@@ -809,17 +816,18 @@ func (r *itemCollector) collectNamespaces(
|
||||
var items []*kubernetesResource
|
||||
|
||||
for index := range unstructuredList.Items {
|
||||
nsName := unstructuredList.Items[index].GetName()
|
||||
|
||||
path, err := r.writeToFile(&unstructuredList.Items[index])
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error writing item %s to file",
|
||||
unstructuredList.Items[index].GetName())
|
||||
log.WithError(err).Errorf("Error writing item %s to file", nsName)
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, &kubernetesResource{
|
||||
groupResource: gr,
|
||||
preferredGVR: preferredGVR,
|
||||
name: unstructuredList.Items[index].GetName(),
|
||||
name: nsName,
|
||||
path: path,
|
||||
kind: resource.Kind,
|
||||
})
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestFilterNamespaces(t *testing.T) {
|
||||
func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ie *collections.IncludesExcludes
|
||||
ie *collections.NamespaceIncludesExcludes
|
||||
namespaces []*corev1api.Namespace
|
||||
backup *velerov1api.Backup
|
||||
expectedTrackedNS []string
|
||||
@@ -162,7 +162,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "ns filter by namespace IE filter",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -174,7 +174,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": "ns1"},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -186,7 +186,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
|
||||
{MatchLabels: map[string]string{"name": "ns1"}},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -198,7 +198,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": "ns1"},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Excludes("ns1"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Excludes("ns1"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -210,7 +210,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
|
||||
{MatchLabels: map[string]string{"name": "ns1"}},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Excludes("ns1", "ns2"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Excludes("ns1", "ns2"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -221,7 +221,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "No ns filters",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -231,7 +231,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "ns specified by the IncludeNamespaces cannot be found",
|
||||
backup: builder.ForBackup("velero", "backup").IncludedNamespaces("ns1", "invalid", "*").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1", "invalid", "*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1", "invalid", "*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -242,7 +242,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "terminating ns should not tracked",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1", "ns2"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1", "ns2"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").Phase(corev1api.NamespaceTerminating).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
|
||||
@@ -57,7 +57,7 @@ type Request struct {
|
||||
*velerov1api.Backup
|
||||
StorageLocation *velerov1api.BackupStorageLocation
|
||||
SnapshotLocations []*velerov1api.VolumeSnapshotLocation
|
||||
NamespaceIncludesExcludes *collections.IncludesExcludes
|
||||
NamespaceIncludesExcludes *collections.NamespaceIncludesExcludes
|
||||
ResourceIncludesExcludes collections.IncludesExcludesInterface
|
||||
ResourceHooks []hook.ResourceHook
|
||||
ResolvedActions []framework.BackupItemResolvedActionV2
|
||||
@@ -69,7 +69,7 @@ type Request struct {
|
||||
ResPolicies *resourcepolicies.Policies
|
||||
SkippedPVTracker *skipPVTracker
|
||||
VolumesInformation volume.BackupVolumesInformation
|
||||
ItemBlockChannel chan ItemBlockInput
|
||||
WorkerPool *ItemBlockWorkerPool
|
||||
}
|
||||
|
||||
// BackupVolumesInformation contains the information needs by generating
|
||||
@@ -103,3 +103,7 @@ func (r *Request) FillVolumesInformation() {
|
||||
r.VolumesInformation.BackupOperations = *r.GetItemOperationsList()
|
||||
r.VolumesInformation.BackupName = r.Backup.Name
|
||||
}
|
||||
|
||||
func (r *Request) StopWorkerPool() {
|
||||
r.WorkerPool.Stop()
|
||||
}
|
||||
|
||||
@@ -222,6 +222,12 @@ func (b *BackupBuilder) Phase(phase velerov1api.BackupPhase) *BackupBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
// Phase sets the Backup's queue position.
|
||||
func (b *BackupBuilder) QueuePosition(queuePos int) *BackupBuilder {
|
||||
b.object.Status.QueuePosition = queuePos
|
||||
return b
|
||||
}
|
||||
|
||||
// StorageLocation sets the Backup's storage location.
|
||||
func (b *BackupBuilder) StorageLocation(location string) *BackupBuilder {
|
||||
b.object.Spec.StorageLocation = location
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user