mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-03-16 14:24:45 +00:00
Compare commits
315 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6adcf06b5b | ||
|
|
ffa65605a6 | ||
|
|
bd8dfe9ee2 | ||
|
|
54783fbe28 | ||
|
|
cb5f56265a | ||
|
|
0c7b89a44e | ||
|
|
aa89713559 | ||
|
|
5db4c65a92 | ||
|
|
87db850f66 | ||
|
|
c7631fc4a4 | ||
|
|
9a37478cc2 | ||
|
|
5b54ccd2e0 | ||
|
|
43b926a58b | ||
|
|
9bfc78e769 | ||
|
|
c9e26256fa | ||
|
|
6e315c32e2 | ||
|
|
91cbc40956 | ||
|
|
556d5826a8 | ||
|
|
62939cec18 | ||
|
|
7d6a10d3ea | ||
|
|
1c0cf6c51d | ||
|
|
58f0b29091 | ||
|
|
5cb4cdba61 | ||
|
|
325eb50480 | ||
|
|
993b80a350 | ||
|
|
a909bd1f85 | ||
|
|
62a47b9fc5 | ||
|
|
31e9dcbb87 | ||
|
|
f824c3ca3b | ||
|
|
386599638f | ||
|
|
9796da389d | ||
|
|
dfb1d45831 | ||
|
|
72beb35edc | ||
|
|
7442d20f9d | ||
|
|
4dfb47dd21 | ||
|
|
e72fea8ecd | ||
|
|
f388a5ce51 | ||
|
|
e703e06eeb | ||
|
|
1feaafc03e | ||
|
|
e446ce54f6 | ||
|
|
b7289b51c7 | ||
|
|
6eae73f0bf | ||
|
|
07a3cf759d | ||
|
|
8f8367be65 | ||
|
|
0d80995e62 | ||
|
|
1425ebb369 | ||
|
|
04364ef2ca | ||
|
|
3b5118b45e | ||
|
|
c556603ce2 | ||
|
|
8708d4fda8 | ||
|
|
0db7816fa6 | ||
|
|
b5ccc4373d | ||
|
|
8f2016000a | ||
|
|
327ea3ea13 | ||
|
|
80211d77e5 | ||
|
|
4b6708de2c | ||
|
|
65eaceee0b | ||
|
|
2d8a87fec4 | ||
|
|
eae5bea469 | ||
|
|
87dbc16b0a | ||
|
|
db2193c53a | ||
|
|
643dd784ea | ||
|
|
e7166fc9e9 | ||
|
|
bfb431fcdf | ||
|
|
2d93ab261e | ||
|
|
fcb7fc9356 | ||
|
|
727a4fd0ed | ||
|
|
aa3bd251dd | ||
|
|
dad85b6fc3 | ||
|
|
78e9470028 | ||
|
|
4ba2effaac | ||
|
|
f592a264a6 | ||
|
|
e39374f335 | ||
|
|
10ef43e147 | ||
|
|
b7052c2cb1 | ||
|
|
57370296ab | ||
|
|
f4c4653c08 | ||
|
|
987edf5037 | ||
|
|
99e821a870 | ||
|
|
041e5e2a7e | ||
|
|
8e58099674 | ||
|
|
a43f14b071 | ||
|
|
26053ae6d6 | ||
|
|
60203ad01b | ||
|
|
bcdc30b59a | ||
|
|
a1026cb531 | ||
|
|
f30b9f9504 | ||
|
|
8688568ffc | ||
|
|
61bf2ef777 | ||
|
|
14b34f08cc | ||
|
|
add66eac42 | ||
|
|
096436507e | ||
|
|
554b04e6ca | ||
|
|
c594026c1f | ||
|
|
46776898ab | ||
|
|
fdcfed84f9 | ||
|
|
dbeb16aad7 | ||
|
|
f0c97c489d | ||
|
|
3244cc605f | ||
|
|
6a0307142c | ||
|
|
1ec622245b | ||
|
|
31fb828f8e | ||
|
|
7286d24c35 | ||
|
|
7e4797f588 | ||
|
|
f238a7e47b | ||
|
|
0b2e7d1238 | ||
|
|
73864e31ff | ||
|
|
8a95d512b3 | ||
|
|
4d1802233a | ||
|
|
f73443659a | ||
|
|
7111f3cea2 | ||
|
|
845eee4e60 | ||
|
|
c50ab4a6ea | ||
|
|
6a3f821606 | ||
|
|
34dc381182 | ||
|
|
29b01c3170 | ||
|
|
84571bc54d | ||
|
|
9c1c7d20ff | ||
|
|
7bc57b5a5f | ||
|
|
e7b5d20f4c | ||
|
|
aedc0fe5e2 | ||
|
|
dbaa25405d | ||
|
|
91357b28c4 | ||
|
|
e0c08f03cf | ||
|
|
a56ab10f23 | ||
|
|
d39ad6f208 | ||
|
|
300bc70c68 | ||
|
|
13041b40c2 | ||
|
|
4ffb29d750 | ||
|
|
fe799d7546 | ||
|
|
d91d50f696 | ||
|
|
9dfa108579 | ||
|
|
4cac891fb9 | ||
|
|
5d02af3ce3 | ||
|
|
2944c0dad4 | ||
|
|
cd103add11 | ||
|
|
dc91d6ee67 | ||
|
|
cfc12dc6bf | ||
|
|
9c09d04979 | ||
|
|
20af2c20c5 | ||
|
|
60dd3dc832 | ||
|
|
a5d32f29da | ||
|
|
27ca08b5a5 | ||
|
|
fdf439963c | ||
|
|
975f647323 | ||
|
|
d96434c8c9 | ||
|
|
64e3643006 | ||
|
|
758f6a4847 | ||
|
|
f6b3852d2f | ||
|
|
981b29b4cb | ||
|
|
7688579f75 | ||
|
|
e63486b677 | ||
|
|
bea82a61d6 | ||
|
|
3fc33d3c46 | ||
|
|
99d87aae5b | ||
|
|
960a596e7b | ||
|
|
695a94707d | ||
|
|
8d7957dfae | ||
|
|
a3169aeff3 | ||
|
|
e4726b2389 | ||
|
|
9dc27555bc | ||
|
|
39892abef2 | ||
|
|
c565da2ea6 | ||
|
|
324c2fb448 | ||
|
|
c870eb1645 | ||
|
|
dc3da29f3e | ||
|
|
2579ef1093 | ||
|
|
fa374b6143 | ||
|
|
67cf896eaf | ||
|
|
ad11b38468 | ||
|
|
b9cf90f11c | ||
|
|
f947092f1a | ||
|
|
82367e7ff6 | ||
|
|
df07c39014 | ||
|
|
f2538207f3 | ||
|
|
d1e5d6b13a | ||
|
|
99145bee70 | ||
|
|
9e5769c304 | ||
|
|
21b998e2c5 | ||
|
|
fbeab7291e | ||
|
|
d4a966481b | ||
|
|
6e54879f4d | ||
|
|
e485258d25 | ||
|
|
bd7d28f004 | ||
|
|
7dbe2b4358 | ||
|
|
597cee545a | ||
|
|
9556a39a89 | ||
|
|
45755e14ee | ||
|
|
c907b316a5 | ||
|
|
75d69e1a04 | ||
|
|
a95c90411c | ||
|
|
7178946deb | ||
|
|
14d2f0b30b | ||
|
|
31a7236c7d | ||
|
|
4111eb3940 | ||
|
|
b1e5e4408f | ||
|
|
0d7ef85f98 | ||
|
|
94cf7b39a8 | ||
|
|
5e9605131b | ||
|
|
c2840f1c74 | ||
|
|
5fc76db8c0 | ||
|
|
f9f0e48e04 | ||
|
|
2e9998b20e | ||
|
|
07f30d06b9 | ||
|
|
898fa13ed7 | ||
|
|
f4517f131b | ||
|
|
f4af6156a1 | ||
|
|
d4147e406b | ||
|
|
aafb616c12 | ||
|
|
44f3166f0f | ||
|
|
fb38727b9c | ||
|
|
c29ed91442 | ||
|
|
6280ffddaa | ||
|
|
2e3f41be22 | ||
|
|
8d29051bbe | ||
|
|
6dbe772590 | ||
|
|
2aa319aa30 | ||
|
|
903ee21f31 | ||
|
|
a4e3dccdce | ||
|
|
53e99556ad | ||
|
|
166f50d776 | ||
|
|
6c9699a06d | ||
|
|
79b8cc40b1 | ||
|
|
3e39cb4b0f | ||
|
|
9b02402631 | ||
|
|
6bd8033d24 | ||
|
|
7cec76e445 | ||
|
|
420a65a116 | ||
|
|
3bf4a7dced | ||
|
|
2a5804b595 | ||
|
|
b9af3a1947 | ||
|
|
9a3fabbc55 | ||
|
|
99a46ed818 | ||
|
|
93e8379530 | ||
|
|
72ddfd7d78 | ||
|
|
18260d88ca | ||
|
|
8b3ba78c8c | ||
|
|
b34f2deff2 | ||
|
|
e9666f9aea | ||
|
|
e6aab8ca93 | ||
|
|
99f12b85ba | ||
|
|
f8938e7fed | ||
|
|
cabb04575e | ||
|
|
60dbcbc60d | ||
|
|
4ade8cf8a2 | ||
|
|
75f1817cba | ||
|
|
78ddeef96c | ||
|
|
814db6541f | ||
|
|
cf7a9495c5 | ||
|
|
14a6315667 | ||
|
|
826c73131e | ||
|
|
c90856de65 | ||
|
|
a0aac09f0a | ||
|
|
903caa9c02 | ||
|
|
031df8d5e0 | ||
|
|
21691451e9 | ||
|
|
50d7b1cff1 | ||
|
|
37df853a99 | ||
|
|
d545ad49ba | ||
|
|
7831bf25b9 | ||
|
|
0b40702900 | ||
|
|
2abe91e08c | ||
|
|
1ebe357d18 | ||
|
|
9df17eb02b | ||
|
|
f2a27c3864 | ||
|
|
8ee3436f5c | ||
|
|
4847eeaf62 | ||
|
|
1ec281a64e | ||
|
|
25de1bb3b6 | ||
|
|
e21b21c19e | ||
|
|
b19cad9d01 | ||
|
|
9b6c4b1d47 | ||
|
|
b9159c22ca | ||
|
|
112bea520e | ||
|
|
7e15e9ba05 | ||
|
|
f50cafa472 | ||
|
|
a7b2985c83 | ||
|
|
59289fba76 | ||
|
|
925479553a | ||
|
|
47340e67af | ||
|
|
25a7ef0e87 | ||
|
|
799d596d5c | ||
|
|
5ba00dfb09 | ||
|
|
f1476defde | ||
|
|
67ff0dcbe0 | ||
|
|
aad9dd9068 | ||
|
|
b636334079 | ||
|
|
4d44705ed8 | ||
|
|
81c5b6692d | ||
|
|
02edbc0c65 | ||
|
|
e8208097ba | ||
|
|
4c30499340 | ||
|
|
2a9203f1b2 | ||
|
|
3be76da952 | ||
|
|
7132720a49 | ||
|
|
2dbfbc29e8 | ||
|
|
80da461458 | ||
|
|
fdee2700a7 | ||
|
|
8e1c4a7dc5 | ||
|
|
09b5183fce | ||
|
|
c5b70b4a0d | ||
|
|
248a840918 | ||
|
|
04fb20676d | ||
|
|
996d2a025f | ||
|
|
528392ac5b | ||
|
|
56df64b625 | ||
|
|
eb8b382816 | ||
|
|
571c9bd3ef | ||
|
|
037db22afe | ||
|
|
4c1457c318 | ||
|
|
c0699c443b | ||
|
|
69e307918b | ||
|
|
571a816a61 | ||
|
|
2de5a5c1a7 | ||
|
|
133db854b2 |
68
.github/workflows/e2e-test-kind.yaml
vendored
68
.github/workflows/e2e-test-kind.yaml
vendored
@@ -8,16 +8,26 @@ on:
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
outputs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
@@ -44,6 +54,26 @@ jobs:
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
|
||||
docker save velero:pr-test-linux-amd64 -o ./velero.tar
|
||||
# Check and build MinIO image once for all e2e tests
|
||||
- name: Check Bitnami MinIO Dockerfile version
|
||||
id: minio-version
|
||||
run: |
|
||||
DOCKERFILE_SHA=$(curl -s https://api.github.com/repos/bitnami/containers/commits?path=bitnami/minio/2025/debian-12/Dockerfile\&per_page=1 | jq -r '.[0].sha')
|
||||
echo "dockerfile_sha=${DOCKERFILE_SHA}" >> $GITHUB_OUTPUT
|
||||
- name: Cache MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
- name: Build MinIO Image from Bitnami Dockerfile
|
||||
if: steps.minio-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Building MinIO image from Bitnami Dockerfile..."
|
||||
git clone --depth 1 https://github.com/bitnami/containers.git /tmp/bitnami-containers
|
||||
cd /tmp/bitnami-containers/bitnami/minio/2025/debian-12
|
||||
docker build -t bitnami/minio:local .
|
||||
docker save bitnami/minio:local > ${{ github.workspace }}/minio-image.tar
|
||||
# Create json of k8s versions to test
|
||||
# from guide: https://stackoverflow.com/a/65094398/4590470
|
||||
setup-test-matrix:
|
||||
@@ -75,20 +105,34 @@ jobs:
|
||||
needs:
|
||||
- build
|
||||
- setup-test-matrix
|
||||
- get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
# Fetch the pre-built MinIO image from the build job
|
||||
- name: Fetch built MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ needs.build.outputs.minio-dockerfile-sha }}
|
||||
- name: Load MinIO Image
|
||||
run: |
|
||||
echo "Loading MinIO image..."
|
||||
docker load < ./minio-image.tar
|
||||
- name: Install MinIO
|
||||
run:
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
|
||||
run: |
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ROOT_USER=minio" -e "MINIO_ROOT_PASSWORD=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:local
|
||||
- uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
skipClusterLogsExport: true
|
||||
@@ -141,7 +185,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
- name: Upload debug bundle
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: DebugBundle
|
||||
name: DebugBundle-k8s-${{ matrix.k8s }}-job-${{ strategy.job-index }}
|
||||
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
|
||||
|
||||
33
.github/workflows/get-go-version.yaml
vendored
Normal file
33
.github/workflows/get-go-version.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: "The target branch's ref"
|
||||
required: true
|
||||
type: string
|
||||
outputs:
|
||||
version:
|
||||
description: "The expected Go version"
|
||||
value: ${{ jobs.extract.outputs.version }}
|
||||
|
||||
jobs:
|
||||
extract:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.pick-version.outputs.version }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- id: pick-version
|
||||
run: |
|
||||
if [ "${{ inputs.ref }}" == "main" ]; then
|
||||
version=$(grep '^go ' go.mod | awk '{print $2}' | cut -d. -f1-2)
|
||||
else
|
||||
goDirectiveVersion=$(grep '^go ' go.mod | awk '{print $2}')
|
||||
toolChainVersion=$(grep '^toolchain ' go.mod | awk '{print $2}')
|
||||
version=$(printf "%s\n%s\n" "$goDirectiveVersion" "$toolChainVersion" | sort -V | tail -n1)
|
||||
fi
|
||||
|
||||
echo "version=$version"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
2
.github/workflows/nightly-trivy-scan.yml
vendored
2
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
|
||||
2
.github/workflows/pr-changelog-check.yml
vendored
2
.github/workflows/pr-changelog-check.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Changelog check
|
||||
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'kind/changelog-not-required') || contains(github.event.pull_request.labels.*.name, 'Design') || contains(github.event.pull_request.labels.*.name, 'Website') || contains(github.event.pull_request.labels.*.name, 'Documentation'))}}
|
||||
|
||||
16
.github/workflows/pr-ci-check.yml
vendored
16
.github/workflows/pr-ci-check.yml
vendored
@@ -1,18 +1,26 @@
|
||||
name: Pull Request CI Check
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run CI
|
||||
needs: get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
- name: Upload test coverage
|
||||
|
||||
2
.github/workflows/pr-codespell.yml
vendored
2
.github/workflows/pr-codespell.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@master
|
||||
|
||||
2
.github/workflows/pr-containers.yml
vendored
2
.github/workflows/pr-containers.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
name: Checkout
|
||||
|
||||
- name: Set up QEMU
|
||||
|
||||
2
.github/workflows/pr-goreleaser.yml
vendored
2
.github/workflows/pr-goreleaser.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
name: Checkout
|
||||
|
||||
- name: Verify .goreleaser.yml and try a dryrun release.
|
||||
|
||||
20
.github/workflows/pr-linter-check.yml
vendored
20
.github/workflows/pr-linter-check.yml
vendored
@@ -7,18 +7,26 @@ on:
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run Linter Check
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.1.1
|
||||
version: v2.5.0
|
||||
args: --verbose
|
||||
|
||||
2
.github/workflows/push-builder.yml
vendored
2
.github/workflows/push-builder.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
|
||||
# there are at least two commits: the first one is the merge commit and the second one is the real commit
|
||||
|
||||
15
.github/workflows/push.yml
vendored
15
.github/workflows/push.yml
vendored
@@ -9,17 +9,24 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.ref_name }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
2
.github/workflows/rebase.yml
vendored
2
.github/workflows/rebase.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the latest code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Automatic Rebase
|
||||
|
||||
2
.github/workflows/stale-issues.yml
vendored
2
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9.1.0
|
||||
- uses: actions/stale@v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
||||
@@ -17,6 +17,7 @@ If you're using Velero and want to add your organization to this list,
|
||||
<a href="https://www.replicated.com/" border="0" target="_blank"><img alt="replicated.com" src="site/static/img/adopters/replicated-logo-red.svg" height="50"></a>
|
||||
<a href="https://cloudcasa.io/" border="0" target="_blank"><img alt="cloudcasa.io" src="site/static/img/adopters/cloudcasa.svg" height="50"></a>
|
||||
<a href="https://azure.microsoft.com/" border="0" target="_blank"><img alt="azure.com" src="site/static/img/adopters/azure.svg" height="50"></a>
|
||||
<a href="https://www.broadcom.com/" border="0" target="_blank"><img alt="broadcom.com" src="site/static/img/adopters/broadcom.svg" height="50"></a>
|
||||
## Success Stories
|
||||
|
||||
Below is a list of adopters of Velero in **production environments** that have
|
||||
@@ -68,6 +69,9 @@ Replicated uses the Velero open source project to enable snapshots in [KOTS][101
|
||||
**[Microsoft Azure][105]**<br>
|
||||
[Azure Backup for AKS][106] is an Azure native, Kubernetes aware, Enterprise ready backup for containerized applications deployed on Azure Kubernetes Service (AKS). AKS Backup utilizes Velero to perform backup and restore operations to protect stateful applications in AKS clusters.<br>
|
||||
|
||||
**[Broadcom][107]**<br>
|
||||
[VMware Cloud Foundation][108] (VCF) offers built-in [vSphere Kubernetes Service][109] (VKS), a Kubernetes runtime that includes a CNCF certified Kubernetes distribution, to deploy and manage containerized workloads. VCF empowers platform engineers with native [Kubernetes multi-cluster management][110] capability for managing Kubernetes (K8s) infrastructure at scale. VCF utilizes Velero for Kubernetes data protection enabling platform engineers to back up and restore containerized workloads manifests & persistent volumes, helping to increase the resiliency of stateful applications in VKS cluster.
|
||||
|
||||
## Adding your organization to the list of Velero Adopters
|
||||
|
||||
If you are using Velero and would like to be included in the list of `Velero Adopters`, add an SVG version of your logo to the `site/static/img/adopters` directory in this repo and submit a [pull request][3] with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). See this for an example [PR][4].
|
||||
@@ -125,3 +129,8 @@ If you would like to add your logo to a future `Adopters of Velero` section on [
|
||||
|
||||
[105]: https://azure.microsoft.com/
|
||||
[106]: https://learn.microsoft.com/azure/backup/backup-overview
|
||||
|
||||
[107]: https://www.broadcom.com/
|
||||
[108]: https://www.vmware.com/products/cloud-infrastructure/vmware-cloud-foundation
|
||||
[109]: https://www.vmware.com/products/cloud-infrastructure/vsphere-kubernetes-service
|
||||
[110]: https://blogs.vmware.com/cloud-foundation/2025/09/29/empowering-platform-engineers-with-native-kubernetes-multi-cluster-management-in-vmware-cloud-foundation/
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.6-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.7-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.6-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.7-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.73
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.104
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.6-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.7-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
|---------------------|---------------------------------------------------------------|--------------------------------------------------|
|
||||
| Scott Seago | [sseago](https://github.com/sseago) | [OpenShift](https://github.com/openshift) |
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/) |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | Broadcom |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | Broadcom |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | Broadcom |
|
||||
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | Broadcom |
|
||||
| Anshul Ahuja | [anshulahuja98](https://github.com/anshulahuja98) | [Microsoft Azure](https://www.github.com/azure/) |
|
||||
| Tiger Kaovilai | [kaovilai](https://github.com/kaovilai) | [OpenShift](https://github.com/openshift) |
|
||||
|
||||
@@ -27,14 +27,3 @@
|
||||
* JenTing Hsiao ([jenting](https://github.com/jenting))
|
||||
* Dave Smith-Uchida ([dsu-igeek](https://github.com/dsu-igeek))
|
||||
* Ming Qiu ([qiuming-best](https://github.com/qiuming-best))
|
||||
|
||||
## Velero Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
|------------------------|:------------------------------------------------------------------------------------:|
|
||||
| Technical Lead | Daniel Jiang [reasonerjt](https://github.com/reasonerjt) |
|
||||
| Kubernetes CSI Liaison | |
|
||||
| Deployment | |
|
||||
| Community Management | Orlin Vasilev [OrlinVasilev](https://github.com/OrlinVasilev) |
|
||||
| Product Management | Pradeep Kumar Chaturvedi [pradeepkchaturvedi](https://github.com/pradeepkchaturvedi) |
|
||||
|
||||
|
||||
@@ -42,13 +42,11 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, and 1.33.1 |
|
||||
| 1.18 | 1.18-latest | 1.33.7, 1.34.1, and 1.35.0 |
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
| 1.13 | 1.18-latest | 1.26.5, 1.27.3, 1.27.8, and 1.28.3 |
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.26.7, and 1.27.3 |
|
||||
| 1.11 | 1.18-latest | 1.23.10, 1.24.9, 1.25.5, and 1.26.1 |
|
||||
|
||||
Velero supports IPv4, IPv6, and dual stack environments. Support for this was tested against Velero v1.8.
|
||||
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.24.6 as tilt-helper
|
||||
FROM golang:1.25.7 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
109
changelogs/CHANGELOG-1.18.md
Normal file
109
changelogs/CHANGELOG-1.18.md
Normal file
@@ -0,0 +1,109 @@
|
||||
## v1.18
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.18.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.18.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.18/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.18/upgrade-to-1.18/
|
||||
|
||||
### Highlights
|
||||
#### Concurrent backup
|
||||
In v1.18, Velero is capable to process multiple backups concurrently. This is a significant usability improvement, especially for multiple tenants or multiple users case, backups submitted from different users could run their backups simultaneously without interfering with each other.
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/concurrent-backup-processing.md for more details.
|
||||
|
||||
#### Cache volume for data movers
|
||||
In v1.18, Velero allows users to configure cache volumes for data mover pods during restore for CSI snapshot data movement and fs-backup. This brings below benefits:
|
||||
- Solve the problem that data mover pods fail to when pod's ephemeral disk is limited
|
||||
- Solve the problem that multiple data mover pods fail to run concurrently in one node when the node's ephemeral disk is limited
|
||||
- Working together with backup repository's cache limit configuration, cache volume with appropriate size helps to improve the restore throughput
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/backup-repo-cache-volume.md for more details.
|
||||
|
||||
#### Incremental size for data movers
|
||||
In v1.18, Velero allows users to observe the incremental size of data movers backups for CSI snapshot data movement and fs-backup, so that users could visually see the data reduction due to incremental backup.
|
||||
|
||||
#### Wildcard support for namespaces
|
||||
In v1.18, Velero allows to use Glob regular expressions for namespace filters during backup and restore, so that users could filter namespaces in a batch manner.
|
||||
|
||||
#### VolumePolicy for PVC phase
|
||||
In v1.18, Velero VolumePolicy supports actions by PVC phase, which help users to do special operations for PVCs with a specific phase, e.g., skip PVCs in Pending/Lost status from the backup.
|
||||
|
||||
#### Scalability and Resiliency improvements
|
||||
##### Prevent Velero server OOM Kill for large backup repositories
|
||||
In v1.18, some backup repository operations are delay executed out of Velero server, so Velero server won't be OOM Killed.
|
||||
|
||||
#### Performance improvement for VolumePolicy
|
||||
In v1.18, VolumePolicy is enhanced for large number of pods/PVCs so that the performance is significantly improved.
|
||||
|
||||
#### Events for data mover pod diagnostic
|
||||
In v1.18, events are recorded into data mover pod diagnostic, which allows user to see more information for troubleshooting when the data mover pod fails.
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: 1.25.7
|
||||
kopia: 0.22.3
|
||||
|
||||
### Limitations/Known issues
|
||||
|
||||
### Breaking changes
|
||||
#### Deprecation of PVC selected node feature
|
||||
According to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/main/GOVERNANCE.md#deprecation-policy), PVC selected node feature is deprecated in v1.18. Velero could appropriately handle PVC's selected-node annotation, so users don't need to do anything particularly.
|
||||
|
||||
### All Changes
|
||||
* Remove backup from running list when backup fails validation (#9498, @sseago)
|
||||
* Maintenance Job only uses the first element of the LoadAffinity array (#9494, @blackpiglet)
|
||||
* Fix issue #9478, add diagnose info on expose peek fails (#9481, @Lyndon-Li)
|
||||
* Add Role, RoleBinding, ClusterRole, and ClusterRoleBinding in restore sequence. (#9474, @blackpiglet)
|
||||
* Add maintenance job and data mover pod's labels and annotations setting. (#9452, @blackpiglet)
|
||||
* Fix plugin init container names exceeding DNS-1123 limit (#9445, @mpryc)
|
||||
* Add PVC-to-Pod cache to improve volume policy performance (#9441, @shubham-pampattiwar)
|
||||
* Remove VolumeSnapshotClass from CSI B/R process. (#9431, @blackpiglet)
|
||||
* Use hookIndex for recording multiple restore exec hooks. (#9366, @blackpiglet)
|
||||
* Sanitize Azure HTTP responses in BSL status messages (#9321, @shubham-pampattiwar)
|
||||
* Remove labels associated with previous backups (#9206, @Joeavaikath)
|
||||
* Add VolumePolicy support for PVC Phase conditions to allow skipping Pending PVCs (#9166, @claude)
|
||||
* feat: Enhance BackupStorageLocation with Secret-based CA certificate support (#9141, @kaovilai)
|
||||
* Add `--apply` flag to `install` command, allowing usage of Kubernetes apply to make changes to existing installs (#9132, @mjnagel)
|
||||
* Fix issue #9194, add doc for GOMAXPROCS behavior change (#9420, @Lyndon-Li)
|
||||
* Apply volume policies to VolumeGroupSnapshot PVC filtering (#9419, @shubham-pampattiwar)
|
||||
* Fix issue #9276, add doc for cache volume support (#9418, @Lyndon-Li)
|
||||
* Add Prometheus metrics for maintenance jobs (#9414, @shubham-pampattiwar)
|
||||
* Fix issue #9400, connect repo first time after creation so that init params could be written (#9407, @Lyndon-Li)
|
||||
* Cache volume for PVR (#9397, @Lyndon-Li)
|
||||
* Cache volume support for DataDownload (#9391, @Lyndon-Li)
|
||||
* don't copy securitycontext from first container if configmap found (#9389, @sseago)
|
||||
* Refactor repo provider interface for static configuration (#9379, @Lyndon-Li)
|
||||
* Fix issue #9365, prevent fake completion notification due to multiple update of single PVR (#9375, @Lyndon-Li)
|
||||
* Add cache volume configuration (#9370, @Lyndon-Li)
|
||||
* Track actual resource names for GenerateName in restore status (#9368, @shubham-pampattiwar)
|
||||
* Fix managed fields patch for resources using GenerateName (#9367, @shubham-pampattiwar)
|
||||
* Support cache volume for generic restore exposer and pod volume exposer (#9362, @Lyndon-Li)
|
||||
* Add incrementalSize to DU/PVB for reporting new/changed size (#9357, @sseago)
|
||||
* Add snapshotSize for DataDownload, PodVolumeRestore (#9354, @Lyndon-Li)
|
||||
* Add cache dir configuration for udmrepo (#9353, @Lyndon-Li)
|
||||
* Fix the Job build error when BackupReposiotry name longer than 63. (#9350, @blackpiglet)
|
||||
* Add cache configuration to VGDP (#9342, @Lyndon-Li)
|
||||
* Fix issue #9332, add bytesDone for cache files (#9333, @Lyndon-Li)
|
||||
* Fix typos in documentation (#9329, @T4iFooN-IX)
|
||||
* Concurrent backup processing (#9307, @sseago)
|
||||
* VerifyJSONConfigs verify every elements in Data. (#9302, @blackpiglet)
|
||||
* Fix issue #9267, add events to data mover prepare diagnostic (#9296, @Lyndon-Li)
|
||||
* Add option for privileged fs-backup pod (#9295, @sseago)
|
||||
* Fix issue #9193, don't connect repo in repo controller (#9291, @Lyndon-Li)
|
||||
* Implement concurrency control for cache of native VolumeSnapshotter plugin. (#9281, @0xLeo258)
|
||||
* Fix issue #7904, remove the code and doc for PVC node selection (#9269, @Lyndon-Li)
|
||||
* Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases (#9264, @shubham-pampattiwar)
|
||||
* Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment (#9256, @shubham-pampattiwar)
|
||||
* Implement wildcard namespace pattern expansion for backup namespace includes/excludes. This change adds support for wildcard patterns (*, ?, [abc], {a,b,c}) in namespace includes and excludes during backup operations (#9255, @Joeavaikath)
|
||||
* Protect VolumeSnapshot field from race condition during multi-thread backup (#9248, @0xLeo258)
|
||||
* Update AzureAD Microsoft Authentication Library to v1.5.0 (#9244, @priyansh17)
|
||||
* Get pod list once per namespace in pvc IBA (#9226, @sseago)
|
||||
* Fix issue #7725, add design for backup repo cache configuration (#9148, @Lyndon-Li)
|
||||
* Fix issue #9229, don't attach backupPVC to the source node (#9233, @Lyndon-Li)
|
||||
* feat: Permit specifying annotations for the BackupPVC (#9173, @clementnuss)
|
||||
1
changelogs/unreleased/9508-kaovilai
Normal file
1
changelogs/unreleased/9508-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
Fix VolumePolicy PVC phase condition filter for unbound PVCs (#9507)
|
||||
1
changelogs/unreleased/9537-kaovilai
Normal file
1
changelogs/unreleased/9537-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
Fix VolumePolicy PVC phase condition filter for unbound PVCs (#9507)
|
||||
1
changelogs/unreleased/9539-Joeavaikath
Normal file
1
changelogs/unreleased/9539-Joeavaikath
Normal file
@@ -0,0 +1 @@
|
||||
Support all glob wildcard characters in namespace validation
|
||||
@@ -594,6 +594,8 @@ spec:
|
||||
description: Phase is the current state of the Backup.
|
||||
enum:
|
||||
- New
|
||||
- Queued
|
||||
- ReadyToStart
|
||||
- FailedValidation
|
||||
- InProgress
|
||||
- WaitingForPluginOperations
|
||||
@@ -625,6 +627,11 @@ spec:
|
||||
filters that happen as items are processed.
|
||||
type: integer
|
||||
type: object
|
||||
queuePosition:
|
||||
description: |-
|
||||
QueuePosition is the position of the backup in the queue.
|
||||
Only relevant when Phase is "Queued"
|
||||
type: integer
|
||||
startTimestamp:
|
||||
description: |-
|
||||
StartTimestamp records the time a backup was started.
|
||||
|
||||
@@ -113,10 +113,38 @@ spec:
|
||||
description: Bucket is the bucket to use for object storage.
|
||||
type: string
|
||||
caCert:
|
||||
description: CACert defines a CA bundle to use when verifying
|
||||
TLS connections to the provider.
|
||||
description: |-
|
||||
CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
Deprecated: Use CACertRef instead.
|
||||
format: byte
|
||||
type: string
|
||||
caCertRef:
|
||||
description: |-
|
||||
CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
when verifying TLS connections to the provider. The Secret must be in the same
|
||||
namespace as the BackupStorageLocation.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must be
|
||||
a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be
|
||||
defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: Prefix is the path inside a bucket to use for Velero
|
||||
storage. Optional.
|
||||
|
||||
@@ -33,6 +33,12 @@ spec:
|
||||
jsonPath: .status.progress.totalBytes
|
||||
name: Total Bytes
|
||||
type: integer
|
||||
- description: Incremental bytes
|
||||
format: int64
|
||||
jsonPath: .status.incrementalBytes
|
||||
name: Incremental Bytes
|
||||
priority: 10
|
||||
type: integer
|
||||
- description: Name of the Backup Storage Location where this backup should be
|
||||
stored
|
||||
jsonPath: .spec.backupStorageLocation
|
||||
@@ -189,6 +195,11 @@ spec:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
incrementalBytes:
|
||||
description: IncrementalBytes holds the number of bytes new or changed
|
||||
since the last backup
|
||||
format: int64
|
||||
type: integer
|
||||
message:
|
||||
description: Message is a message about the pod volume backup's status.
|
||||
type: string
|
||||
|
||||
@@ -133,6 +133,10 @@ spec:
|
||||
snapshotID:
|
||||
description: SnapshotID is the ID of the volume snapshot to be restored.
|
||||
type: string
|
||||
snapshotSize:
|
||||
description: SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
format: int64
|
||||
type: integer
|
||||
sourceNamespace:
|
||||
description: SourceNamespace is the original namespace for namaspace
|
||||
mapping.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -108,6 +108,10 @@ spec:
|
||||
description: SnapshotID is the ID of the Velero backup snapshot to
|
||||
be restored from.
|
||||
type: string
|
||||
snapshotSize:
|
||||
description: SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
format: int64
|
||||
type: integer
|
||||
sourceNamespace:
|
||||
description: |-
|
||||
SourceNamespace is the original namespace where the volume is backed up from.
|
||||
|
||||
@@ -33,6 +33,12 @@ spec:
|
||||
jsonPath: .status.progress.totalBytes
|
||||
name: Total Bytes
|
||||
type: integer
|
||||
- description: Incremental bytes
|
||||
format: int64
|
||||
jsonPath: .status.incrementalBytes
|
||||
name: Incremental Bytes
|
||||
priority: 10
|
||||
type: integer
|
||||
- description: Name of the Backup Storage Location where this backup should be
|
||||
stored
|
||||
jsonPath: .spec.backupStorageLocation
|
||||
@@ -173,6 +179,11 @@ spec:
|
||||
as a result of the DataUpload.
|
||||
nullable: true
|
||||
type: object
|
||||
incrementalBytes:
|
||||
description: IncrementalBytes holds the number of bytes new or changed
|
||||
since the last backup
|
||||
format: int64
|
||||
type: integer
|
||||
message:
|
||||
description: Message is a message about the DataUpload's status.
|
||||
type: string
|
||||
|
||||
File diff suppressed because one or more lines are too long
70
design/Implemented/apply-flag.md
Normal file
70
design/Implemented/apply-flag.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# Apply flag for install command
|
||||
|
||||
## Abstract
|
||||
Add an `--apply` flag to the install command that enables applying existing resources rather than creating them. This can be useful as part of the upgrade process for existing installations.
|
||||
|
||||
## Background
|
||||
The current Velero install command creates resources but doesn't provide a direct way to apply updates to an existing installation.
|
||||
Users attempting to run the install command on an existing installation receive "already exists" messages.
|
||||
Upgrade steps for existing installs typically involve a three (or more) step process to apply updated CRDs (using `--dry-run` and piping to `kubectl apply`) and then updating/setting images on the Velero deployment and node-agent.
|
||||
|
||||
## Goals
|
||||
- Provide a simple flag to enable applying resources on an existing Velero installation.
|
||||
- Use server-side apply to update existing resources rather than attempting to create them.
|
||||
- Maintain consistency with the regular install flow.
|
||||
|
||||
## Non Goals
|
||||
- Implement special logic for specific version-to-version upgrades (i.e. resource deletion, etc).
|
||||
- Add complex upgrade validation or pre/post-upgrade hooks.
|
||||
- Provide rollback capabilities.
|
||||
|
||||
## High-Level Design
|
||||
The `--apply` flag will be added to the Velero install command.
|
||||
When this flag is set, the installation process will use server-side apply to update existing resources instead of using create on new resources.
|
||||
This flag can be used as _part_ of the upgrade process, but will not always fully handle an upgrade.
|
||||
|
||||
## Detailed Design
|
||||
The implementation adds a new boolean flag `--apply` to the install command.
|
||||
This flag will be passed through to the underlying install functions where the resource creation logic resides.
|
||||
|
||||
When the flag is set to true:
|
||||
- The `createOrApplyResource` function will use server-side apply with field manager "velero-cli" and `force=true` to update resources.
|
||||
- Resources will be applied in the same order as they would be created during installation.
|
||||
- Custom Resource Definitions will still be processed first, and the system will wait for them to be established before continuing.
|
||||
|
||||
The server-side apply approach with `force=true` ensures that resources are updated even if there are conflicts with the last applied state.
|
||||
This provides a best-effort mechanism to apply resources that follows the same flow as installation but updates resources instead of creating them.
|
||||
|
||||
No special handling is added for specific versions or resource structures, making this a general-purpose mechanism for applying resources.
|
||||
|
||||
## Alternatives Considered
|
||||
1. Creating a separate `upgrade` command that would duplicate much of the install command logic.
|
||||
- Rejected due to code duplication and maintenance overhead.
|
||||
|
||||
2. Implementing version-specific upgrade logic to handle breaking changes between versions.
|
||||
- Rejected as overly complex and difficult to maintain across multiple version paths.
|
||||
- This could be considered again in the future, but is not in the scope of the current design.
|
||||
|
||||
3. Adding automatic detection of existing resources and switching to apply mode.
|
||||
- Rejected as it could lead to unexpected behavior and confusion if users unintentionally apply changes to existing resources.
|
||||
|
||||
## Security Considerations
|
||||
The apply flag maintains the same security profile as the install command.
|
||||
No additional permissions are required beyond what is needed for resource creation.
|
||||
The use of `force=true` with server-side apply could potentially override manual changes made to resources, but this is a necessary trade-off to ensure apply is successful.
|
||||
|
||||
## Compatibility
|
||||
This enhancement is compatible with all existing Velero installations as it is a new opt-in flag.
|
||||
It does not change any resource formats or API contracts.
|
||||
The apply process is best-effort and does not guarantee compatibility between arbitrary versions of Velero.
|
||||
Users should still consult release notes for any breaking changes that may require manual intervention.
|
||||
This flag could be adopted by the helm chart, specifically for CRD updates, to simplify the CRD update job.
|
||||
|
||||
## Implementation
|
||||
The implementation involves:
|
||||
1. Adding support for `Apply` to the existing Kubernetes client code.
|
||||
1. Adding the `--apply` flag to the install command options.
|
||||
1. Changing `createResource` to `createOrApplyResource` and updating it to use server-side apply when the `apply` boolean is set.
|
||||
|
||||
The implementation is straightforward and follows existing code patterns.
|
||||
No migration of state or special handling of specific resources is required.
|
||||
231
design/Implemented/backup-repo-cache-volume.md
Normal file
231
design/Implemented/backup-repo-cache-volume.md
Normal file
@@ -0,0 +1,231 @@
|
||||
# Backup Repository Cache Volume Design
|
||||
|
||||
## Glossary & Abbreviation
|
||||
|
||||
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
|
||||
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
|
||||
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
|
||||
**Data Mover Pods**: Intermediate pods which hold VGDP and complete the data transfer. See [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3] for details.
|
||||
**Repository Maintenance Pods**: Pods for [Repository Maintenance Jobs][4], which holds VGDP to run repository maintenance.
|
||||
|
||||
## Background
|
||||
|
||||
According to the [Unified Repository design][1] Velero uses selectable backup repositories for various backup/restore methods, i.e., fs-backup, volume snapshot data movement, etc. Some backup repositories may need to cache data on the client side for various repository operation, so as to accelerate the execution.
|
||||
In the existing [Backup Repository Configuration][5], we allow users to configure the cache data size (`cacheLimitMB`). However, the cache data is still stored in the root file system of data mover pods/repository maintenance pods, so stored in the root file system of the node. This is not good enough, reasons:
|
||||
- In many distributions, the node's system disk size is predefined, non configurable and limit, e.g., the system disk size may be 20G or less
|
||||
- Velero supports concurrent data movements in each node. The cache in each of the concurrent data mover pods could quickly run out of the system disk and cause problems like pod eviction, failure of pod creation, degradation of Kubernetes QoS, etc.
|
||||
|
||||
We need to allow users to prepare a dedicated location, e.g., a dedictated volume, for the cache.
|
||||
Not all backup repositories or not all backup repository operations require cache, we need to define the details when and how the cache is used.
|
||||
|
||||
## Goals
|
||||
|
||||
- Create a mechanism for users to configure cache volumes for various pods running VGDP
|
||||
- Design the workflow to assign the cache volume pod path to backup repositories
|
||||
- Describe when and how the cache volume is used
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- The solution is based on [Unified Repository design][1], [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3], legacy data paths are not supported. E.g., when a pod volume restore (PVR) runs with legacy Restic path, if any data is cached, the cache still resides in the root file system.
|
||||
|
||||
## Solution
|
||||
|
||||
### Cache Data
|
||||
|
||||
Varying on backup repositoires, cache data may include payload data or repository metadata, e.g., indexes to the payload data chunks.
|
||||
|
||||
Payload data is highly related to the backup data, and normally take the majority of the repository data as well as the cache data.
|
||||
|
||||
Repository metadata is related to the backup repository's chunking algorithm, data chunk mapping method, etc, and so the size is not proportional to the backup data size.
|
||||
On the other hand for some backup repository, in extreme cases, the repository metadata may be significantly large. E.g., Kopia's indexes are per chunks, if there are huge number of small files in the repository, Kopia's index data may be in the same level of or even larger than the payload data.
|
||||
However, in the cases that repository metadata data become the majority, other bottlenecks may emerge and concurrency of data movers may be significantly constrained, so the requirement to cache volumes may go away.
|
||||
|
||||
Therefore, for now we only consider the cache volume requirement for payload data, and leave the consideration for metadata as a future enhancement.
|
||||
|
||||
### Scenarios
|
||||
|
||||
Backup repository cache varies on backup repositories and backup repository operation during VGDP runs. Below are the scenarios when VGDP runs:
|
||||
- Data Upload for Backup: this is the process to upload/write the backup data into the backup repository, e.g., DataUpload or PodVolumeBackup. The pieces of data is almost directly written to the repository, sometimes with a small group staying shortly in the local place. That is to say, there should not be large scale data cached for this scenario, so we don't prepare dedicated cache for this scenario.
|
||||
- Repository Maintenance: Repository maintenance most often visits the backup repository's metadata and sometimes it needs to visit the file system directories from the backed up data. On the other hand, it is not practical to run concurrent maintenance jobs in one node. So the cache data is neither large nor affect the root file system too much. Therefore, we don't need to prepare dedicated cache for this scenario.
|
||||
- Data Download for Restore: this is the process to download/read the backup data from the backup repository during restore, e.g., DataDownload or PodVolumeRestore. For backup repositories for which data are stored in remote backup storages (e.g., Kopia repository stores data in remote object stores), large scale of data are cached locally to accerlerate the restore. Therefore, we need dedicate cache volumes for this scenario.
|
||||
- Backup Deletion: During this scenario, backup repository is connected, metadata is enumerated to find the repository snapshot representing the backup data. That is to say, only metadata is cached if any. Therefore, dedicated cache volumes are not required in this scenario.
|
||||
|
||||
The above analyses are based on the common behavior of backup repositories and they are not considering the case that backup repository metadata takes majority or siginficant proportion of the cache data.
|
||||
As a conclusion of the analyses, we will create dedicated cache volumes for restore scenarios.
|
||||
For other scenarios, we can add them regarded to the future changes/requirements. The mechanism to expose and connect the cache volumes should work for all scenarios. E.g., if we need to consider the backup repository metadata case, we may need cache volumes for backup and repository maintenance as well, then we can just reuse the same cache volume provision and connection mechanism to backup and repository maintenance scenarios.
|
||||
|
||||
### Cache Data and Lifecycle
|
||||
|
||||
If available, one cache volume is dedicately assigned to one data mover pod. That is, the cached data is destroyed when the data mover pod completes. Then the backup repository instance also closes.
|
||||
Cache data are fully managed by the specific backup repository. So the backup repository may also have its own way to GC the cache data.
|
||||
That is to say, cache data GC may be launched by the backup repository instance during the running of the data mover pod; then the left data are automatically destroyed when the data mover pod and the cache PVC are destroyed (cache PVC's `reclaimPolicy` is always `Deleted`, so once the cache PVC is destroyed, the volume will also be destroyed). So no specially logics are needed for cache data GC.
|
||||
|
||||
### Data Size
|
||||
|
||||
Cache volumes take storage space and cluster resources (PVC, PV), therefore, cache volumes should be created only when necessary and the volumes should be with reasonable size based on the cache data size:
|
||||
- It is not a good bargain to have cache volumes for small backups, small backups will use resident cache location (the cache location in the root file system)
|
||||
- The cache data size has a limit, the existing `cacheLimitMB` is used for this purpose. E.g., it could be set as 1024 for a 1TB backup, which means 1GB of data is cached and the old cache data exceeding this size will be cleared. Therefore, it is meaningless to set the cache volume size much larger than `cacheLimitMB`
|
||||
|
||||
### Cache Volume Size
|
||||
|
||||
The cache volume size is calculated from below factors (for Restore scenarios):
|
||||
- **Limit**: The limit of the cache data, that is represented by `cacheLimitMB`, the default value is 5GB
|
||||
- **backupSize**: The size of the backup as a reference to evaluate whether to create a cache volume. It doesn't mean the backup data really decides the cache data all the time, it is just a reference to evaluate the scale of the backup, small scale backups may need small cache data. Sometimes, backupSize is not irrelevant to the size of cache data, in this case, ResidentThreshold should not be set, Limit will be used directly. It is unlikely that backupSize is unavailable, but once that happens, ResidentThreshold is ignored, Limit will be used directly.
|
||||
- **ResidentThreshold**: The minimum backup size that a cache volume is created
|
||||
- **InflationPercentage**: Considering the overhead of the file system and the possible delay of the cache cleanup, there should be an inflation for the final volume size vs. the logical size, otherwise, the cache volume may be overrun. This inflation percentage is hardcoded, e.g., 20%.
|
||||
|
||||
A formula is as below:
|
||||
```
|
||||
cacheVolumeSize = ((backupSize != 0 ? (backupSize > residentThreshold ? limit : 0) : limit) * (100 + inflationPercentage)) / 100
|
||||
```
|
||||
Finally, the `cacheVolumeSize` will be rounded up to GiB considering the UX friendliness, storage friendliness and management friendliness.
|
||||
|
||||
### PVC/PV
|
||||
|
||||
The PVC for a cache volume is created in Velero namespace and a storage class is required for the cache PVC. The PVC's accessMode is `ReadWriteOnce` and volumeMode is `FileSystem`, so the storage class provided should support this specification. Otherwise, if the storageclass doesn't support either of the specifications, the data mover pod may be hang in `Pending` state until a timeout setting with the data movement (e.g. `prepareTimeout`) and the data movement will finally fail.
|
||||
It is not expected that the cache volume is retained after data mover pod is deleted, so the `reclaimPolicy` for the storageclass must be `Delete`.
|
||||
|
||||
To detect the problems in the storageclass and fail earlier, a validation is applied to the storageclass and once the validation fails, the cache configuration will be ignored, so the data mover pod will be created without a cache volume.
|
||||
|
||||
### Cache Volume Configurations
|
||||
|
||||
Below configurations are introduced:
|
||||
- **residentThresholdMB**: the minimum data size(in MB) to be processed (if available) that a cache volume is created
|
||||
- **cacheStorageClass**: the name of the storage class to provision the cache PVC
|
||||
|
||||
Not like `cacheLimitMB` which is set to and affect the backup repository, the above two configurations are actually data mover configurations of how to create cache volumes to data mover pods; and the two configurations don't need to be per backup repository. So we add them to the node-agent Configuration.
|
||||
|
||||
### Sample
|
||||
|
||||
Below are some examples of the node-agent configMap with the configurations:
|
||||
|
||||
Sample-1:
|
||||
```json
|
||||
{
|
||||
"cacheVolume": {
|
||||
"storageClass": "sc-1",
|
||||
"residentThresholdMB": 1024
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample-2:
|
||||
```json
|
||||
{
|
||||
"cacheVolume": {
|
||||
"storageClass": "sc-1",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample-3:
|
||||
```json
|
||||
{
|
||||
"cacheVolume": {
|
||||
"residentThresholdMB": 1024
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**sample-1**: This is a valid configuration. Restores with backup data size larger than 1G will be assigned a cache volume using storage class `sc-1`.
|
||||
**sample-2**: This is a valid configuration. Data mover pods are always assigned a cache volume using storage class `sc-1`.
|
||||
**sample-3**: This is not a valid configuration because the storage class is absent. Velero gives up creating a cache volume.
|
||||
|
||||
To create the configMap, users need to save something like the above sample to a json file and then run below command:
|
||||
```
|
||||
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
The cache volume configurations will be visited by node-agent server, so they also need to specify the `--node-agent-configmap` to the `velero node-agent` parameters.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Backup and Restore
|
||||
|
||||
The restore needs to know the backup size so as to calculate the cache volume size, some new fields are added to the DataDownload and PodVolumeRestore CRDs.
|
||||
|
||||
`snapshotSize` field is also added to DataDownload and PodVolumeRestore's `spec`:
|
||||
```yaml
|
||||
spec:
|
||||
snapshotID:
|
||||
description: SnapshotID is the ID of the Velero backup snapshot to
|
||||
be restored from.
|
||||
type: string
|
||||
snapshotSize:
|
||||
description: SnapshotSize is the logical size of the snapshot.
|
||||
format: int64
|
||||
type: integer
|
||||
```
|
||||
|
||||
`snapshotSize` represents the total size of the backup; during restore, the value is transferred from DataUpload/PodVolumeBackup's `Status.Progress.TotalBytes` to DataDownload/PodVolumeRestore.
|
||||
|
||||
It is unlikely that `Status.Progress.TotalBytes` from DataUpload/PodVolumeBackup is unavailable, but once it happens, according to the above formula, `residentThresholdMB` is ignored, cache volume size is calculated directly from cache limit for the corresponding backup repository.
|
||||
|
||||
### Exposer
|
||||
|
||||
Cache volume configurations are retrieved by node-agent and passed through DataDownload/PodVolumeRestore to GenericRestore exposer/PodVolume exposer.
|
||||
The exposers are responsible to calculate cache volume size, create cache PVCs and mount them to the restorePods.
|
||||
If the calculated cache volume size is 0, or any of the critical parameters is missing (e.g., cache volume storage class), the exposers ignore the cache volume configuration and continue with creating restorePods without cache volumes, so no impact to the result of the restore.
|
||||
|
||||
Exposers mount the cache volume to a predefined directory and pass the directory to the data mover pods through the `cache-volume-path` parameter.
|
||||
|
||||
Below data structure is added to the exposers' expose parameters:
|
||||
|
||||
```go
|
||||
type GenericRestoreExposeParam struct {
|
||||
// RestoreSize specifies the data size for the volume to be restored
|
||||
RestoreSize int64
|
||||
|
||||
// CacheVolume specifies the info for cache volumes
|
||||
CacheVolume *CacheVolumeInfo
|
||||
}
|
||||
|
||||
type PodVolumeExposeParam struct {
|
||||
// RestoreSize specifies the data size for the volume to be restored
|
||||
RestoreSize int64
|
||||
|
||||
// CacheVolume specifies the info for cache volumes
|
||||
CacheVolume *repocache.CacheConfigs
|
||||
}
|
||||
|
||||
type CacheConfigs struct {
|
||||
// StorageClass specifies the storage class for cache volumes
|
||||
StorageClass string
|
||||
|
||||
// Limit specifies the maximum size of the cache data
|
||||
Limit int64
|
||||
|
||||
// ResidentThreshold specifies the minimum size of the cache data to create a cache volume
|
||||
ResidentThreshold int64
|
||||
}
|
||||
```
|
||||
|
||||
### Data Mover Pods
|
||||
|
||||
Data mover pods retrieve the cache volume directory from `cache-volume-path` parameter and pass it to Unified Repository.
|
||||
If the directory is empty, Unified Repository uses the resident location for data cache, that is, the root file system.
|
||||
|
||||
### Kopia Repository
|
||||
|
||||
Kopia repository supports cache directory configuration for both metadata and data. The existing `SetupConnectOptions` is modified to customize the `CacheDirectory`:
|
||||
|
||||
```go
|
||||
func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions {
|
||||
...
|
||||
|
||||
return repo.ConnectOptions{
|
||||
CachingOptions: content.CachingOptions{
|
||||
CacheDirectory: cacheDir,
|
||||
...
|
||||
},
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: Implemented/vgdp-micro-service/vgdp-micro-service.md
|
||||
[3]: Implemented/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md
|
||||
[4]: Implemented/repo_maintenance_job_config.md
|
||||
[5]: Implemented/backup-repo-config.md
|
||||
417
design/Implemented/bsl-certificate-support_design.md
Normal file
417
design/Implemented/bsl-certificate-support_design.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Design for BSL Certificate Support Enhancement
|
||||
|
||||
## Abstract
|
||||
|
||||
This design document describes the enhancement of BackupStorageLocation (BSL) certificate management in Velero, introducing a Secret-based certificate reference mechanism (`caCertRef`) alongside the existing inline certificate field (`caCert`). This enhancement provides a more secure, Kubernetes-native approach to certificate management while enabling future CLI improvements for automatic certificate discovery.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, Velero supports TLS certificate verification for object storage providers through an inline `caCert` field in the BSL specification. While functional, this approach has several limitations:
|
||||
|
||||
- **Security**: Certificates are stored directly in the BSL YAML, potentially exposing sensitive data
|
||||
- **Management**: Certificate rotation requires updating the BSL resource itself
|
||||
- **CLI Usability**: Users must manually specify certificates when using CLI commands
|
||||
- **Size Limitations**: Large certificate bundles can make BSL resources unwieldy
|
||||
|
||||
Issue #9097 and PR #8557 highlight the need for improved certificate management that addresses these concerns while maintaining backward compatibility.
|
||||
|
||||
## Goals
|
||||
|
||||
- Provide a secure, Secret-based certificate storage mechanism
|
||||
- Maintain full backward compatibility with existing BSL configurations
|
||||
- Enable future CLI enhancements for automatic certificate discovery
|
||||
- Simplify certificate rotation and management
|
||||
- Provide clear migration path for existing users
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- Removing support for inline certificates immediately
|
||||
- Changing the behavior of existing BSL configurations
|
||||
- Implementing client-side certificate validation
|
||||
- Supporting certificates from ConfigMaps or other resource types
|
||||
|
||||
## High-Level Design
|
||||
|
||||
### API Changes
|
||||
|
||||
#### New Field: CACertRef
|
||||
|
||||
```go
|
||||
type ObjectStorageLocation struct {
|
||||
// Existing field (now deprecated)
|
||||
// +optional
|
||||
// +kubebuilder:deprecatedversion:warning="caCert is deprecated, use caCertRef instead"
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
|
||||
// New field for Secret reference
|
||||
// +optional
|
||||
CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
The `SecretKeySelector` follows standard Kubernetes patterns:
|
||||
```go
|
||||
type SecretKeySelector struct {
|
||||
// Name of the Secret
|
||||
Name string `json:"name"`
|
||||
// Key within the Secret
|
||||
Key string `json:"key"`
|
||||
}
|
||||
```
|
||||
|
||||
### Certificate Resolution Logic
|
||||
|
||||
The system follows a priority-based resolution:
|
||||
|
||||
1. If `caCertRef` is specified, retrieve certificate from the referenced Secret
|
||||
2. If `caCert` is specified (and `caCertRef` is not), use the inline certificate
|
||||
3. If neither is specified, no custom CA certificate is used
|
||||
|
||||
### Validation
|
||||
|
||||
BSL validation ensures mutual exclusivity:
|
||||
```go
|
||||
func (bsl *BackupStorageLocation) Validate() error {
|
||||
if bsl.Spec.ObjectStorage != nil &&
|
||||
bsl.Spec.ObjectStorage.CACert != nil &&
|
||||
bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
return errors.New("cannot specify both caCert and caCertRef in objectStorage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### BSL Controller Changes
|
||||
|
||||
The BSL controller incorporates validation during reconciliation:
|
||||
|
||||
```go
|
||||
func (r *backupStorageLocationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
// ... existing code ...
|
||||
|
||||
// Validate BSL configuration
|
||||
if err := location.Validate(); err != nil {
|
||||
r.logger.WithError(err).Error("BSL validation failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// ... continue reconciliation ...
|
||||
}
|
||||
```
|
||||
|
||||
### Repository Provider Integration
|
||||
|
||||
All repository providers implement consistent certificate handling:
|
||||
|
||||
```go
|
||||
func configureCACert(bsl *velerov1api.BackupStorageLocation, credGetter *credentials.CredentialGetter) ([]byte, error) {
|
||||
if bsl.Spec.ObjectStorage == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Prefer caCertRef (new method)
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
certString, err := credGetter.FromSecret.Get(bsl.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
return []byte(certString), nil
|
||||
}
|
||||
|
||||
// Fall back to caCert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
return bsl.Spec.ObjectStorage.CACert, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Certificate Discovery Integration
|
||||
|
||||
#### Background: PR #8557 Implementation
|
||||
PR #8557 ("CLI automatically discovers and uses cacert from BSL") was merged in August 2025, introducing automatic CA certificate discovery from BackupStorageLocation for Velero CLI download operations. This eliminated the need for users to manually specify the `--cacert` flag when performing operations like `backup describe`, `backup download`, `backup logs`, and `restore logs`.
|
||||
|
||||
#### Current Implementation (Post PR #8557)
|
||||
The CLI now automatically discovers certificates from BSL through the `pkg/cmd/util/cacert/bsl_cacert.go` module:
|
||||
|
||||
```go
|
||||
// Current implementation only supports inline caCert
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
// ... fetch BSL ...
|
||||
if bsl.Spec.ObjectStorage != nil && len(bsl.Spec.ObjectStorage.CACert) > 0 {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Enhancement with caCertRef Support
|
||||
This design extends the existing CLI certificate discovery to support the new `caCertRef` field:
|
||||
|
||||
```go
|
||||
// Enhanced implementation supporting both caCert and caCertRef
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
// ... fetch BSL ...
|
||||
|
||||
// Prefer caCertRef over inline caCert
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
secret := &corev1api.Secret{}
|
||||
key := types.NamespacedName{
|
||||
Name: bsl.Spec.ObjectStorage.CACertRef.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
if err := client.Get(ctx, key, secret); err != nil {
|
||||
return "", errors.Wrap(err, "error getting certificate secret")
|
||||
}
|
||||
|
||||
certData, ok := secret.Data[bsl.Spec.ObjectStorage.CACertRef.Key]
|
||||
if !ok {
|
||||
return "", errors.Errorf("key %s not found in secret",
|
||||
bsl.Spec.ObjectStorage.CACertRef.Key)
|
||||
}
|
||||
return string(certData), nil
|
||||
}
|
||||
|
||||
// Fall back to inline caCert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Certificate Resolution Priority
|
||||
|
||||
The CLI follows this priority order for certificate resolution:
|
||||
|
||||
1. **`--cacert` flag** - Manual override, highest priority
|
||||
2. **`caCertRef`** - Secret-based certificate (recommended)
|
||||
3. **`caCert`** - Inline certificate (deprecated)
|
||||
4. **System certificate pool** - Default fallback
|
||||
|
||||
#### User Experience Improvements
|
||||
|
||||
With both PR #8557 and this enhancement:
|
||||
|
||||
```bash
|
||||
# Automatic discovery - works with both caCert and caCertRef
|
||||
velero backup describe my-backup
|
||||
velero backup download my-backup
|
||||
velero backup logs my-backup
|
||||
velero restore logs my-restore
|
||||
|
||||
# Manual override still available
|
||||
velero backup describe my-backup --cacert /custom/ca.crt
|
||||
|
||||
# Debug output shows certificate source
|
||||
velero backup download my-backup --log-level=debug
|
||||
# [DEBUG] Resolved CA certificate from BSL 'default' Secret 'storage-ca-cert' key 'ca-bundle.crt'
|
||||
```
|
||||
|
||||
#### RBAC Considerations for CLI
|
||||
|
||||
CLI users need read access to Secrets when using `caCertRef`:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: velero-cli-user
|
||||
namespace: velero
|
||||
rules:
|
||||
- apiGroups: ["velero.io"]
|
||||
resources: ["backups", "restores", "backupstoragelocations"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
# Limited to secrets referenced by BSLs
|
||||
```
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
#### Phase 1: Introduction (Current)
|
||||
- Add `caCertRef` field
|
||||
- Mark `caCert` as deprecated
|
||||
- Both fields supported, mutual exclusivity enforced
|
||||
|
||||
#### Phase 2: Migration Period
|
||||
- Documentation and tools to help users migrate
|
||||
- Warning messages for `caCert` usage
|
||||
- CLI enhancements to leverage `caCertRef`
|
||||
|
||||
#### Phase 3: Future Removal
|
||||
- Remove `caCert` field in major version update
|
||||
- Provide migration tool for automatic conversion
|
||||
|
||||
## User Experience
|
||||
|
||||
### Creating a BSL with Certificate Reference
|
||||
|
||||
1. Create a Secret containing the CA certificate:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-ca-cert
|
||||
namespace: velero
|
||||
type: Opaque
|
||||
data:
|
||||
ca-bundle.crt: <base64-encoded-certificate>
|
||||
```
|
||||
|
||||
2. Reference the Secret in BSL:
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: my-bucket
|
||||
caCertRef:
|
||||
name: storage-ca-cert
|
||||
key: ca-bundle.crt
|
||||
```
|
||||
|
||||
### Certificate Rotation
|
||||
|
||||
With Secret-based certificates:
|
||||
```bash
|
||||
# Update the Secret with new certificate
|
||||
kubectl create secret generic storage-ca-cert \
|
||||
--from-file=ca-bundle.crt=new-ca.crt \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# No BSL update required - changes take effect on next use
|
||||
```
|
||||
|
||||
### CLI Usage Examples
|
||||
|
||||
#### Immediate Benefits
|
||||
- No change required for existing workflows
|
||||
- Certificate validation errors include helpful context
|
||||
|
||||
#### Future CLI Enhancements
|
||||
```bash
|
||||
# Automatic certificate discovery
|
||||
velero backup download my-backup
|
||||
|
||||
# Manual override still available
|
||||
velero backup download my-backup --cacert /custom/ca.crt
|
||||
|
||||
# Debug certificate resolution
|
||||
velero backup download my-backup --log-level=debug
|
||||
# [DEBUG] Resolved CA certificate from BSL 'default' Secret 'storage-ca-cert'
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Advantages of Secret-based Storage
|
||||
|
||||
1. **Encryption at Rest**: Secrets are encrypted in etcd
|
||||
2. **RBAC Control**: Fine-grained access control via Kubernetes RBAC
|
||||
3. **Audit Trail**: Secret access is auditable
|
||||
4. **Separation of Concerns**: Certificates separate from configuration
|
||||
|
||||
### Required Permissions
|
||||
|
||||
The Velero server requires additional RBAC permissions:
|
||||
```yaml
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
# Scoped to secrets referenced by BSLs
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- Existing BSLs with `caCert` continue to function unchanged
|
||||
- No breaking changes to API
|
||||
- Gradual migration path
|
||||
|
||||
### Forward Compatibility
|
||||
|
||||
- Design allows for future enhancements:
|
||||
- Multiple certificate support
|
||||
- Certificate chain validation
|
||||
- Automatic certificate discovery from cloud providers
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Core Implementation ✓ (Current PR)
|
||||
- API changes with new `caCertRef` field
|
||||
- Controller validation
|
||||
- Repository provider updates
|
||||
- Basic testing
|
||||
|
||||
### Phase 2: CLI Enhancement (Future)
|
||||
- Automatic certificate discovery in CLI
|
||||
- Enhanced error messages
|
||||
- Debug logging for certificate resolution
|
||||
|
||||
### Phase 3: Migration Tools (Future)
|
||||
- Automated migration scripts
|
||||
- Validation tools
|
||||
- Documentation updates
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
- BSL validation logic
|
||||
- Certificate resolution in providers
|
||||
- Controller behavior
|
||||
|
||||
### Integration Tests
|
||||
- End-to-end backup/restore with `caCertRef`
|
||||
- Certificate rotation scenarios
|
||||
- Migration from `caCert` to `caCertRef`
|
||||
|
||||
### Manual Testing Scenarios
|
||||
1. Create BSL with `caCertRef`
|
||||
2. Perform backup/restore operations
|
||||
3. Rotate certificate in Secret
|
||||
4. Verify continued operation
|
||||
|
||||
## Documentation
|
||||
|
||||
### User Documentation
|
||||
- Migration guide from `caCert` to `caCertRef`
|
||||
- Examples for common cloud providers
|
||||
- Troubleshooting guide
|
||||
|
||||
### API Documentation
|
||||
- Updated API reference
|
||||
- Deprecation notices
|
||||
- Field descriptions
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### ConfigMap-based Storage
|
||||
- Pros: Similar to Secrets, simpler API
|
||||
- Cons: Not designed for sensitive data, no encryption at rest
|
||||
- Decision: Secrets are the Kubernetes-standard for sensitive data
|
||||
|
||||
### External Certificate Management
|
||||
- Pros: Integration with cert-manager, etc.
|
||||
- Cons: Additional complexity, dependencies
|
||||
- Decision: Keep it simple, allow users to manage certificates as needed
|
||||
|
||||
### Immediate Removal of Inline Certificates
|
||||
- Pros: Cleaner API, forces best practices
|
||||
- Cons: Breaking change, migration burden
|
||||
- Decision: Gradual deprecation respects existing users
|
||||
|
||||
## Conclusion
|
||||
|
||||
This design provides a secure, Kubernetes-native approach to certificate management in Velero while maintaining backward compatibility. It establishes the foundation for enhanced CLI functionality and improved user experience, addressing the concerns raised in issue #9097 and enabling the features proposed in PR #8557.
|
||||
|
||||
The phased approach ensures smooth migration for existing users while delivering immediate security benefits for new deployments.
|
||||
257
design/Implemented/concurrent-backup-processing.md
Normal file
257
design/Implemented/concurrent-backup-processing.md
Normal file
@@ -0,0 +1,257 @@
|
||||
# Concurrent Backup Processing
|
||||
|
||||
This enhancement will enable Velero to process multiple backups at the same time. This is largely a usability enhancement rather than a performance enhancement, since the overall backup throughput may not be significantly improved over the current implementation, since we are already processing individual backup items in parallel. It is a significant usability improvement, though, as with the current design, a user who submits a small backup may have to wait significantly longer than expected if the backup is submitted immediately after a large backup.
|
||||
|
||||
## Background
|
||||
|
||||
With the current implementation, only one backup may be `InProgress` at a time. A second backup created will not start processing until the first backup moves on to `WaitingForPluginOperations` or `Finalizing`. This is a usability concern, especially in clusters when multiple users are initiating backups. With this enhancement, we intend to allow multiple backups to be processed concurrently. This will allow backups to start processing immediately, even if a large backup was just submitted by another user. This enhancement will build on top of the prior parallel item processing feature by creating a dedicatede ItemBlock worker pool for each running backup. The pool will be created at the beginning of the backup reconcile, and the input channel will be passed to the Kubernetes backupper just like it is in the current release.
|
||||
|
||||
The primary challenge is to make sure that the same workload in multiple backups is not backed up concurrently. If that were to happen, we would risk data corruption, especially around the processing of pod hooks and volume backup. For this first release we will take a conservative, high-level approach to overlap detection. Two backups will not run concurrently if there is any overlap in included namespaces. For example, if a backup that includes `ns1` and `ns2` is running, then a second backup for `ns2` and `ns3` will not be started. If a backup which does not filter namespaces is running (either a whole cluster backup or a non-namespace-limited backup with a label selector) then no other backups will be started, since a backup across all namespaces overlaps with any other backup. Calculating item-level overlap for queued backups is problematic since we don't know which items are included in a backup until backup processing has begun. A future release may add ItemBlock overlap detection, where at the item block worker level, the same item will not be processed by two different workers at the same time. This works together with workload conflict detection to further detect conflicts in a more granular level for shared resources between backups. Eventually, with a more complete understanding of individual workloads (either via ItemBlocks or some higher level model), the namespace level overlap detection may be relaxed in future versions.
|
||||
|
||||
## Goals
|
||||
- Process multiple backups concurrently
|
||||
- Detect namespace overlap to avoid conflicts
|
||||
- For queued backups (not yet runnable due to concurrency limits or overlap), indicate the queue position in status
|
||||
|
||||
## Non Goals
|
||||
- Handling NFS PVs when more than one PV point to the same underlying NFS share
|
||||
- Handling VGDP cancellation for failed backups on restart
|
||||
- Mounting a PVC for scenarios in which /tmp is too small for the number of concurrent backups
|
||||
- Providing a mechanism to identify high priority backups which get preferential treatment in terms of ItemBlock worker availability
|
||||
- Item-level overlap detection (future feature)
|
||||
- Providing the ability to disable namespace-level overlap detection once Item-level overlap detection is in place (although this may be supported in a future version).
|
||||
|
||||
## High-Level Design
|
||||
|
||||
### Backup CRD changes
|
||||
|
||||
Two new backup phases will be added: `Queued` and `ReadyToStart`. In the Backup workflow, new backups will be moved to the Queued phase when they are added to the backup queue. When a backup is removed from the queue because it is now able to run, it will be moved to the `ReadyToStart` phase, which will allow the backup controller to start processing it.
|
||||
|
||||
In addition, a new Status field, `QueuePosition`, will be added to track the backup's current position in the queue.
|
||||
|
||||
### New Controller: `backupQueueReconciler`
|
||||
|
||||
A new reconciler will be added, `backupQueueReconciler` which will use the current `backupReconciler` logic for reconciling `New` backups but instead of running the backup, it will move the Backup to the `Queued` phase and set `QueuePosition`.
|
||||
|
||||
In addition, this reconciler will periodically reconcile all queued backups (on some configurable time interval) and if there is a runnable backup, remove it from the queue, update `QueuePosition` for any queued backups behind it, and update its phase to `ReadyToStart`.
|
||||
|
||||
Queued backups will be reconciled in order based on `QueuePosition`, so the first runnable backup found will be processed. A backup is runnable if both of the following conditions are true:
|
||||
1) The total number of backups either `InProgress` or `ReadyToStart` is less than the configured number of concurrent backups.
|
||||
2) The backup has no overlap with any backups currently `InProgress` or `ReadyToStart` or with any `Queued` backups with a higher (i.e. closer to 1) queue position than this backup.
|
||||
|
||||
### Updates to Backup controller
|
||||
|
||||
The current `backupReconciler` will change its reconciling rules. Instead of watching and reconciling New backups, it will reconcile `ReadyToStart` backups. In addition, it will be configured to run in parallel by setting `MaxConcurrentReconciles` based on the `concurrent-backups` server arg.
|
||||
|
||||
The startup (and shutdown) of the ItemBlock worker pool will be moved from reconciler startup to the backup reconcile, which will give each running backup its own dedicated worker pool. The per-backup worker pool will will use the existing `--item-block-worker-count` installer/server arg. This means that the maximum number of ItemBlock workers for the entire Velero pod will be the ItemBlock worker count multiplied by concurrentBackups. For example, if concurrentBackups is 5, and itemBlockWorkerCount is 6, then there will be, at most, 30 worker threads active, 5 dedicated to each InProgress backup, but this maximum will only be achieved when the maximum number of backups are InProgress. This also means that each InProgress backup will have a dedicated ItemBlock input channel with the same fixed buffer size.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### New Install/Server configuration args
|
||||
|
||||
A new install/server arg, `concurrent-backups` will be added. This will be an int-valued field specifying the number of backups which may be processed concurrently (with phase `InProgress`). If not specified, the default value of 1 will be used.
|
||||
|
||||
### Consideration of backup overlap and concurrent backup processing
|
||||
|
||||
The primary consideration for running additional backups concurrently is the configured `concurrent-backups` parameter. If the total number of `InProgress` and `ReadyToStart` backups is equal to `concurrent-backups` then any `Queued` backups will remain in the queue.
|
||||
|
||||
The second consideration is backup overlap. In order to prevent interaction between running backups (particularly around volume backup and pod hooks), we cannot allow two overlapping backups to run at the same time. For now, we will define overlap broadly -- requiring that two concurrent backups don't include any of the same namespaces. A backup for `ns1` can run concurrently with a backup for `ns2`, but a backup for `[ns1,ns2]` cannot run concurrently with a backup for `ns1`. One consequence of this approach is that a backup which includes all namespaces (even if further filtered by resource or label) cannot run concurrently with *any other backup*.
|
||||
|
||||
When determining which queued backup to run next, velero will look for the next queued backup which has no overlap with any InProgress backup or any Queued backup ahead of it. The reason we need to consider queued as well as running backups for overlap detection is as follows.
|
||||
|
||||
Consider the following scenario. These are the current not-completed backups (ordered from oldest to newest)
|
||||
1. backup1, includedNamespaces: [ns1, ns2], phase: InProgress
|
||||
2. backup2, includedNamespaces: [ns2, ns3, ns5], phase: Queued, QueuePosition: 1
|
||||
3. backup3, includedNamespaces: [ns4, ns3], phase: Queued, QueuePosition: 2
|
||||
4. backup4, includedNamespaces: [ns5, ns6], phase: Queued, QueuePosition: 2
|
||||
5. backup5, includedNamespaces: [ns8, ns9], phase: Queued, QueuePosition: 3
|
||||
|
||||
Assuming `concurrent-backups` is 2, on the next reconcile, Velero will be able to start a second backup if there is one with no overlap. `backup2` cannot run, since `ns2` overlaps between it and the running `backup1`. If we only considered running overlap (and not queued overlap), then `backup3` could run now. It conflicts with the queued `backup2` on `ns3` but it does not conflict with the running backup. However, if it runs now, then when `backup1` completes, then `backup2` still can't run (since it now overlaps with running `backup3`on `ns3`), so `backup4` starts instead. Now when `backup3` completes, `backup2` still can't run (since it now conflicts with `backup4` on `ns5`). This means that even though it was the second backup created, it's the fourth to run -- providing worse time to completion than without parallel backups. If a queued backup has a large number of namespaces (a full-cluster backup for example), it would never run as long as new single-namespace backups keep being added to the queue.
|
||||
|
||||
To resolve this problem we consider both running backups as well as backups ahead in the queue when resolving overlap conflicts. In the above scenario, `backup2` can't run yet since it overlaps with the running backup on `ns2`. In addition, `backup3` and `backup4` also can't run yet since they overlap with queued `backup2`. Therefore, `backup5` will run now. Once `backup1` completes, `backup2` will be free to run.
|
||||
|
||||
### Backup CRD changes
|
||||
|
||||
New Backup phases:
|
||||
```go
|
||||
const (
|
||||
// BackupPhaseQueued means the backup has been added to the
|
||||
// queue by the BackupQueueReconciler.
|
||||
BackupPhaseQueued BackupPhase = "Queued"
|
||||
|
||||
// BackupPhaseReadyToStart means the backup has been removed from the
|
||||
// queue by the BackupQueueReconciler and is ready to start.
|
||||
BackupPhaseReadyToStart BackupPhase = "ReadyToStart"
|
||||
)
|
||||
```
|
||||
|
||||
In addition, a new Status field, `queuePosition`, will be added to track the backup's current position in the queue.
|
||||
```go
|
||||
// QueuePosition is the position held by the backup in the queue.
|
||||
// QueuePosition=1 means this backup is the next to be considered.
|
||||
// Only relevant when Phase is "Queued"
|
||||
// +optional
|
||||
QueuePosition int `json:"queuePosition,omitempty"`
|
||||
```
|
||||
|
||||
### New Controller: `backupQueueReconciler`
|
||||
|
||||
A new reconciler will be added, `backupQueueReconciler` which will reconcile backups under these conditions:
|
||||
1) Watching Create/Update for backups in `New` (or empty) phase
|
||||
2) Watching for Backup phase transition from `InProgress` to something else to reconcile all `Queued` backups
|
||||
2) Watching for Backup phase transition from `New` (or empty) to `Queued` to reconcile all `Queued` backups
|
||||
2) Periodic reconcile of `Queued` backups to handle backups queued at server startup as well as to make sure we never have a situation where backups are queued indefinitely because of a race condition or was otherwise missed in the reconcile on prior backup completion.
|
||||
|
||||
The reconciler will be set up as follows -- note that New backups are reconciled on Create/Update, while Queued backups are reconciled when an InProgress backup moves on to another state or when a new backup moves to the Queued state. We also reconcile Queued backups periodically to handle the case of a Velero pod restart with Queued backups, as well as to handle possible edge cases where a queued backup doesn't get moved out of the queue at the point of backup completion or an error occurs during a prior Queued backup reconcile.
|
||||
|
||||
```go
|
||||
func (c *backupOperationsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// only consider Queued backups, order by QueuePosition
|
||||
gp := kube.NewGenericEventPredicate(func(object client.Object) bool {
|
||||
backup := object.(*velerov1api.Backup)
|
||||
return (backup.Status.Phase == velerov1api.BackupPhaseQueued)
|
||||
})
|
||||
s := kube.NewPeriodicalEnqueueSource(c.logger.WithField("controller", constant.ControllerBackupOperations), mgr.GetClient(), &velerov1api.BackupList{}, c.frequency, kube.PeriodicalEnqueueSourceOption{
|
||||
Predicates: []predicate.Predicate{gp},
|
||||
OrderFunc: queuePositionOrderFunc,
|
||||
})
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.Backup{}, builder.WithPredicates(predicate.Funcs{
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
backup := ue.ObjectNew.(*velerov1api.Backup)
|
||||
return backup.Status.Phase == "" || backup.status.Phase == velerov1api.BackupPhaseNew
|
||||
},
|
||||
CreateFunc: func(event.CreateEvent) bool {
|
||||
return backup.Status.Phase == "" || backup.status.Phase == velerov1api.BackupPhaseNew
|
||||
},
|
||||
DeleteFunc: func(de event.DeleteEvent) bool {
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(ge event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
})).
|
||||
Watch(
|
||||
&source.Kind{Type: &velerov1api.Backup{}},
|
||||
&handler.EnqueueRequestsFromMapFunc{
|
||||
ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request {
|
||||
backupList := velerov1api.BackupList{}
|
||||
if err := p.List(ctx, backupList); err != nil {
|
||||
p.logger.WithError(err).Error("error listing backups")
|
||||
return
|
||||
}
|
||||
requests = []reconcile.request{}
|
||||
// filter backup list by Phase=queued
|
||||
// sort backup list by queuePosition
|
||||
return requests
|
||||
}),
|
||||
},
|
||||
builder.WithPredicates(predicate.Funcs{
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
oldBackup := ue.ObjectOld.(*velerov1api.Backup)
|
||||
newBackup := ue.ObjectNew.(*velerov1api.Backup)
|
||||
return oldBackup.Status.Phase == velerov1api.BackupPhaseInProgress &&
|
||||
newBackup.Status.Phase != velerov1api.BackupPhaseInProgress ||
|
||||
oldBackup.Status.Phase != velerov1api.BackupPhaseQueued &&
|
||||
newBackup.Status.Phase == velerov1api.BackupPhaseQueued
|
||||
},
|
||||
CreateFunc: func(event.CreateEvent) bool {
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(de event.DeleteEvent) bool {
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(ge event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
}).
|
||||
WatchesRawSource(s).
|
||||
Named(constant.ControllerBackupQueue).
|
||||
Complete(c)
|
||||
}
|
||||
```
|
||||
|
||||
New backups will be queued: Phase will be set to `Queued`, and `QueuePosition` will be set to a int value incremented from the highest current `QueuePosition` value among Queued backups.
|
||||
|
||||
Queued backups will be removed from the queue if runnable:
|
||||
1) If the total number of backups either InProgress or ReadyToStart is greater than or equal to the concurrency limit, then exit without removing from the queue.
|
||||
2) If the current backup overlaps with any InProgress, ReadyToStart, or Queued backup with `QueuePosition < currentBackup.QueuePosition` then exit without removing from the queue.
|
||||
3) If we get here, the backup is runnable. To resolve a potential race condition where an InProgress backup completes between reconciling the backup with QueuePosition `n-1` and reconciling the current backup with QueuePosition `n`, we also check to see whether there are any runnable backups in the queue ahead of this one. The only time this will happen is if a backup completes immediately before reconcile starts which either frees up a concurrency slot or removes a namespace conflict. In this case, we don't want to run the current backup since the one ahead of this one in the queue (which was recently passed over before the InProgress backup completed) must run first. In this case, exit without removing from the queue.
|
||||
4) If we get here, remove the backup from the queue by setting Phase to `ReadyToStart` and `QueuePosition` to zero. Decrement the `QueuePosition` of any other Queued backups with a `QueuePosition` higher than the current backup's queue position prior to dequeuing. At this point, the backup reconciler will start the backup.
|
||||
|
||||
`if len(inProgressBackups)+len(pendingStartBackups) >= concurrentBackups`
|
||||
|
||||
```
|
||||
switch original.Status.Phase {
|
||||
case "", velerov1api.BackupPhaseNew:
|
||||
// enqueue backup -- set phase=Queued, set queuePosition=maxCurrentQueuePosition+1
|
||||
}
|
||||
// We should only ever get these events when added in order by the periodical enqueue source
|
||||
// so as long as the current backup has not conflicts ahead of it or running, we should be good to
|
||||
// dequeue
|
||||
case "", velerov1api.BackupPhaseQueued:
|
||||
// list backups, filter on Queued, ReadyToStart, and InProgress
|
||||
// if number of InProgress backups + number of ReadyToStart backups >= concurrency limit, exit
|
||||
// generate list of all namespaces included in InProgress, ReadyToStart, and Queued backups with
|
||||
// queuePosition < backup.Status.QueuePosition
|
||||
// if overlap found, exit
|
||||
// check backups ahead of this one in the queue for runnability. If any are runnable, exit
|
||||
// dequeue backup: set Phase to ReadyToStart, QueuePosition to 0, and decrement QueuePosition
|
||||
// for all QueuedBackups behind this one in the queue
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The queue controller will run as a single reconciler thread, so we will not need to deal with concurrency issues when moving backups from New to Queued or from Queued to ReadyToStart, and all of the updates to QueuePosition will be from a single thread.
|
||||
|
||||
### Updates to Backup controller
|
||||
|
||||
The Reconcile logic will be updated to respond to ReadyToStart backups instead of New backups:
|
||||
|
||||
```
|
||||
@@ -234,8 +234,8 @@ func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
// InProgress, we still need this check so we can return nil to indicate we've finished processing
|
||||
// this key (even though it was a no-op).
|
||||
switch original.Status.Phase {
|
||||
- case "", velerov1api.BackupPhaseNew:
|
||||
- // only process new backups
|
||||
+ case velerov1api.BackupPhaseReadyToStart:
|
||||
+ // only process ReadyToStart backups
|
||||
default:
|
||||
b.logger.WithFields(logrus.Fields{
|
||||
"backup": kubeutil.NamespaceAndName(original),
|
||||
```
|
||||
|
||||
In addition, it will be configured to run in parallel by setting `MaxConcurrentReconciles` based on the `concurrent-backups` server arg.
|
||||
|
||||
```
|
||||
@@ -149,6 +149,9 @@ func NewBackupReconciler(
|
||||
func (b *backupReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.Backup{}).
|
||||
+ WithOptions(controller.Options{
|
||||
+ MaxConcurrentReconciles: concurrentBackups,
|
||||
+ }).
|
||||
Named(constant.ControllerBackup).
|
||||
Complete(b)
|
||||
}
|
||||
```
|
||||
|
||||
The controller-runtime core reconciler logic already prevents the same resource from being reconciled by two different reconciler threads, so we don't need to worry about concurrency issues at the controller level.
|
||||
|
||||
The workerPool reference will be moved from the backupReconciler to the backupRequest, since this will now be backup-specific, and the initialization code for the worker pool will be moved from the reconciler init into the backup reconcile. This worker pool will be shut down upon exiting the Reconcile method.
|
||||
|
||||
### Resilience to restart of velero pod
|
||||
|
||||
The new backup phases (`Queued` and `ReadyToStart`) will be resilient to velero pod restarts. If the velero pod crashes or is restarted, only backups in the `InProgress` phase will be failed, so there is no change to current behavior. Queued backups will retain their queue position on restart, and ReadyToStart backups will move to InProgress when reconciled.
|
||||
|
||||
### Observability
|
||||
|
||||
#### Logging
|
||||
|
||||
When a backup is dequeued, an info log message will also include the wait time, calculated as `now - creationTimestamp`. When a backup is passed over due to overlap, an info log message will indicate which namespaces were in conflict.
|
||||
|
||||
#### Velero CLI
|
||||
|
||||
The `velero backup describe` output will include the current queue position for queued backups.
|
||||
115
design/Implemented/wildcard-namespace-support-design.md
Normal file
115
design/Implemented/wildcard-namespace-support-design.md
Normal file
@@ -0,0 +1,115 @@
|
||||
|
||||
# Wildcard Namespace Support
|
||||
|
||||
## Abstract
|
||||
|
||||
Velero currently treats namespace patterns with glob characters as literal strings. This design adds wildcard expansion to support flexible namespace selection using patterns like `app-*` or `test-{dev,staging}`.
|
||||
|
||||
## Background
|
||||
|
||||
Requested in [#1874](https://github.com/vmware-tanzu/velero/issues/1874) for more flexible namespace selection.
|
||||
|
||||
## Goals
|
||||
|
||||
- Support glob pattern expansion in namespace includes/excludes
|
||||
- Maintain backward compatibility with existing `*` behavior
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- Complex regex patterns beyond basic globs
|
||||
|
||||
## High-Level Design
|
||||
|
||||
Wildcard expansion occurs early in both backup and restore flows, converting patterns to literal namespace lists before normal processing.
|
||||
|
||||
### Backup Flow
|
||||
|
||||
Expansion happens in `getResourceItems()` before namespace collection:
|
||||
1. Check if wildcards exist using `ShouldExpandWildcards()`
|
||||
2. Expand patterns against active cluster namespaces
|
||||
3. Replace includes/excludes with expanded literal namespaces
|
||||
4. Continue with normal backup processing
|
||||
|
||||
### Restore Flow
|
||||
|
||||
Expansion occurs in `execute()` after parsing backup contents:
|
||||
1. Extract available namespaces from backup tar
|
||||
2. Expand patterns against backup namespaces (not cluster namespaces)
|
||||
3. Update restore context with expanded namespaces
|
||||
4. Continue with normal restore processing
|
||||
|
||||
This ensures restore wildcards match actual backup contents, not current cluster state.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Status Fields
|
||||
|
||||
Add wildcard expansion tracking to backup and restore CRDs:
|
||||
|
||||
```go
|
||||
type WildcardNamespaceStatus struct {
|
||||
// IncludeWildcardMatches records namespaces that matched include patterns
|
||||
// +optional
|
||||
IncludeWildcardMatches []string `json:"includeWildcardMatches,omitempty"`
|
||||
|
||||
// ExcludeWildcardMatches records namespaces that matched exclude patterns
|
||||
// +optional
|
||||
ExcludeWildcardMatches []string `json:"excludeWildcardMatches,omitempty"`
|
||||
|
||||
// WildcardResult records final namespaces after wildcard processing
|
||||
// +optional
|
||||
WildcardResult []string `json:"wildcardResult,omitempty"`
|
||||
}
|
||||
|
||||
// Added to both BackupStatus and RestoreStatus
|
||||
type BackupStatus struct {
|
||||
// WildcardNamespaces contains wildcard expansion results
|
||||
// +optional
|
||||
WildcardNamespaces *WildcardNamespaceStatus `json:"wildcardNamespaces,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
### Wildcard Expansion Package
|
||||
|
||||
New `pkg/util/wildcard/expand.go` package provides:
|
||||
|
||||
- `ShouldExpandWildcards()` - Skip expansion for simple "*" case
|
||||
- `ExpandWildcards()` - Main expansion function using `github.com/gobwas/glob`
|
||||
- Pattern validation rejecting unsupported regex symbols
|
||||
|
||||
**Supported patterns**: `*`, `?`, `[abc]`, `{a,b,c}`
|
||||
**Unsupported**: `|()`, `**`
|
||||
|
||||
### Implementation Details
|
||||
|
||||
#### Backup Integration (`pkg/backup/item_collector.go`)
|
||||
|
||||
Expansion in `getResourceItems()`:
|
||||
- Call `wildcard.ExpandWildcards()` with cluster namespaces
|
||||
- Update `NamespaceIncludesExcludes` with expanded results
|
||||
- Populate status fields with expansion results
|
||||
|
||||
#### Restore Integration (`pkg/restore/restore.go`)
|
||||
|
||||
Expansion in `execute()`:
|
||||
```go
|
||||
if wildcard.ShouldExpandWildcards(includes, excludes) {
|
||||
availableNamespaces := extractNamespacesFromBackup(backupResources)
|
||||
expandedIncludes, expandedExcludes, err := wildcard.ExpandWildcards(
|
||||
availableNamespaces, includes, excludes)
|
||||
// Update context and status
|
||||
}
|
||||
```
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
1. **Client-side expansion**: Rejected because it wouldn't work for scheduled backups
|
||||
2. **Expansion in `collectNamespaces`**: Rejected because these functions expect literal namespaces
|
||||
|
||||
## Compatibility
|
||||
|
||||
Maintains full backward compatibility - existing "*" behavior unchanged.
|
||||
|
||||
## Implementation
|
||||
|
||||
Target: Velero 1.18
|
||||
124
go.mod
124
go.mod
@@ -1,16 +1,14 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.6
|
||||
go 1.25.7
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
|
||||
cloud.google.com/go/storage v1.57.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.3
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.14
|
||||
@@ -33,23 +31,22 @@ require (
|
||||
github.com/onsi/gomega v1.36.1
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/mod v0.26.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/api v0.241.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/mod v0.30.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
golang.org/x/text v0.31.0
|
||||
google.golang.org/api v0.256.0
|
||||
google.golang.org/grpc v1.77.0
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.33.3
|
||||
k8s.io/apiextensions-apiserver v0.33.3
|
||||
@@ -66,19 +63,19 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go v0.121.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.6 // indirect
|
||||
cloud.google.com/go/auth v0.17.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
@@ -96,18 +93,18 @@ require (
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
@@ -115,36 +112,36 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gofrs/flock v0.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.3 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/klauspost/crc32 v1.3.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.4 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.6 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/minio/crc64nvme v1.1.0 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.94 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.97 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
@@ -156,43 +153,44 @@ require (
|
||||
github.com/natefinch/atomic v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/oklog/run v1.0.0 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/vladimirvivien/gexe v0.1.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
@@ -200,4 +198,4 @@ require (
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20251230033609-d946b1e75197
|
||||
|
||||
270
go.sum
270
go.sum
@@ -1,7 +1,7 @@
|
||||
al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho=
|
||||
al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
|
||||
cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss=
|
||||
cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
@@ -24,10 +24,10 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw=
|
||||
cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=
|
||||
cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
|
||||
cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
|
||||
cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c=
|
||||
cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI=
|
||||
cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
|
||||
cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
@@ -36,8 +36,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
|
||||
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
@@ -45,8 +45,8 @@ cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E=
|
||||
cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY=
|
||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -59,19 +59,19 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0=
|
||||
cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY=
|
||||
cloud.google.com/go/storage v1.57.2 h1:sVlym3cHGYhrp6XZKkKb+92I1V42ks2qKKpB0CF5Mb4=
|
||||
cloud.google.com/go/storage v1.57.2/go.mod h1:n5ijg4yiRXXpCu0sJTD6k+eMf7GRrJmPyr9YxLXGHOk=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
|
||||
@@ -80,10 +80,10 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsI
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
@@ -95,20 +95,20 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 h1:IEjq88XO4PuBDcvmjQJcQGg+w+UaafSy8G5Kcb5tBhI=
|
||||
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5/go.mod h1:exZ0C/1emQJAw5tHOaUDyY1ycttqBAPcxuzf7QbY6ec=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
@@ -189,8 +189,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k=
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@@ -211,8 +211,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
@@ -229,10 +227,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=
|
||||
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
@@ -266,8 +264,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
@@ -301,21 +299,19 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
|
||||
github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -403,12 +399,12 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
|
||||
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
@@ -424,12 +420,12 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hanwen/go-fuse/v2 v2.8.0 h1:wV8rG7rmCz8XHSOwBZhG5YcVqcYjkzivjmbaMafPlAs=
|
||||
github.com/hanwen/go-fuse/v2 v2.8.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI=
|
||||
github.com/hanwen/go-fuse/v2 v2.9.0 h1:0AOGUkHtbOVeyGLr0tXupiid1Vg7QB7M6YUcdmVdC58=
|
||||
github.com/hanwen/go-fuse/v2 v2.9.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
|
||||
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
||||
@@ -486,18 +482,20 @@ github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXw
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
|
||||
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA=
|
||||
github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU=
|
||||
github.com/klauspost/reedsolomon v1.12.6 h1:8pqE9aECQG/ZFitiUD1xK/E83zwosBAZtE3UbuZM8TQ=
|
||||
github.com/klauspost/reedsolomon v1.12.6/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557 h1:je1C/xnmKxnaJsIgj45me5qA51TgtK9uMwTxgDw+9H0=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557/go.mod h1:h53A5JM3t2qiwxqxusBe+PFgGcgZdS+DWCQvG5PTlto=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d h1:U3VB/cDMsPW4zB4JRFbVRDzIpPytt889rJUKAG40NPA=
|
||||
github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d/go.mod h1:h53A5JM3t2qiwxqxusBe+PFgGcgZdS+DWCQvG5PTlto=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -535,12 +533,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
|
||||
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q=
|
||||
github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM=
|
||||
github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc=
|
||||
github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ=
|
||||
github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
@@ -599,8 +597,8 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk=
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
@@ -617,12 +615,12 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777 h1:T7t+u+mnF33qFTDq7bIMSMB51BEA8zkD7aU6tFQNZ6E=
|
||||
github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777/go.mod h1:qlSnPHrsV8eEeU4l4zqEw8mJ5CUeXr7PDiJNI4r4Bus=
|
||||
github.com/project-velero/kopia v0.0.0-20251230033609-d946b1e75197 h1:iGkfuELGvFCqW+zcrhf2GsOwNH1nWYBsC69IOc57KJk=
|
||||
github.com/project-velero/kopia v0.0.0-20251230033609-d946b1e75197/go.mod h1:RL4KehCNKEIDNltN7oruSa3ldwBNVPmQbwmN3Schbjc=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -630,22 +628,20 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -683,8 +679,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -702,8 +698,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tg123/go-htpasswd v1.2.4 h1:HgH8KKCjdmo7jjXWN9k1nefPBd7Be3tFCTjc2jPraPU=
|
||||
github.com/tg123/go-htpasswd v1.2.4/go.mod h1:EKThQok9xHkun6NBMynNv6Jmu24A33XdZzzl4Q7H1+0=
|
||||
@@ -731,8 +727,6 @@ github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
|
||||
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
|
||||
github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
|
||||
github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
|
||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
@@ -746,26 +740,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
|
||||
@@ -780,8 +774,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
|
||||
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -794,8 +790,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -833,8 +829,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -880,8 +876,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -895,8 +891,8 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -908,8 +904,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -973,14 +969,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -990,14 +986,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1051,14 +1047,16 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@@ -1081,8 +1079,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE=
|
||||
google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
|
||||
google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI=
|
||||
google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1134,12 +1132,12 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -1161,8 +1159,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1176,8 +1174,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.24.6-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25.7-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -21,9 +21,11 @@ ENV GO111MODULE=on
|
||||
ENV GOPROXY=${GOPROXY}
|
||||
|
||||
# kubebuilder test bundle is separated from kubebuilder. Need to setup it for CI test.
|
||||
RUN curl -sSLo envtest-bins.tar.gz https://go.kubebuilder.io/test-tools/1.22.1/linux/$(go env GOARCH) && \
|
||||
mkdir /usr/local/kubebuilder && \
|
||||
tar -C /usr/local/kubebuilder --strip-components=1 -zvxf envtest-bins.tar.gz
|
||||
# Using setup-envtest to download envtest binaries
|
||||
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
|
||||
mkdir -p /usr/local/kubebuilder/bin && \
|
||||
ENVTEST_ASSETS_DIR=$(setup-envtest use 1.33.0 --bin-dir /usr/local/kubebuilder/bin -p path) && \
|
||||
cp -r ${ENVTEST_ASSETS_DIR}/* /usr/local/kubebuilder/bin/
|
||||
|
||||
RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/download/v3.2.0/kubebuilder_linux_$(go env GOARCH) && \
|
||||
mv kubebuilder_linux_$(go env GOARCH) /usr/local/kubebuilder/bin/kubebuilder && \
|
||||
@@ -94,7 +96,7 @@ RUN ARCH=$(go env GOARCH) && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.1
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..6ae17f4a1 100644
|
||||
index 5f939c481..f6205aa3c 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,31 @@ require (
|
||||
@@ -14,13 +14,13 @@ index 5f939c481..6ae17f4a1 100644
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.36.0
|
||||
+ golang.org/x/net v0.38.0
|
||||
+ golang.org/x/crypto v0.45.0
|
||||
+ golang.org/x/net v0.47.0
|
||||
+ golang.org/x/oauth2 v0.28.0
|
||||
+ golang.org/x/sync v0.12.0
|
||||
+ golang.org/x/sys v0.31.0
|
||||
+ golang.org/x/term v0.30.0
|
||||
+ golang.org/x/text v0.23.0
|
||||
+ golang.org/x/sync v0.18.0
|
||||
+ golang.org/x/sys v0.38.0
|
||||
+ golang.org/x/term v0.37.0
|
||||
+ golang.org/x/text v0.31.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
@@ -64,11 +64,11 @@ index 5f939c481..6ae17f4a1 100644
|
||||
)
|
||||
|
||||
-go 1.18
|
||||
+go 1.23.0
|
||||
+go 1.24.0
|
||||
+
|
||||
+toolchain go1.23.7
|
||||
+toolchain go1.24.11
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..805792055 100644
|
||||
index 026e1d2fa..4a37e7ac7 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,24 @@
|
||||
@@ -170,8 +170,8 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
+golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
+golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -181,8 +181,8 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
@@ -194,8 +194,8 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -205,21 +205,21 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
||||
@@ -103,6 +103,14 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
|
||||
snapCont.ResourceVersion = ""
|
||||
|
||||
if snapCont.Spec.VolumeSnapshotClassName != nil {
|
||||
// Delete VolumeSnapshotClass from the VolumeSnapshotContent.
|
||||
// This is necessary to make the deletion independent of the VolumeSnapshotClass.
|
||||
snapCont.Spec.VolumeSnapshotClassName = nil
|
||||
p.log.Debugf("Deleted VolumeSnapshotClassName from VolumeSnapshotContent %s to make deletion independent of VolumeSnapshotClass",
|
||||
snapCont.Name)
|
||||
}
|
||||
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).VolumeSnapshotClassName("volumesnapshotclass").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: false,
|
||||
function: func(
|
||||
@@ -82,7 +82,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
name: "Error case, deletion fails",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
|
||||
@@ -169,7 +169,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookLog.Error(err)
|
||||
errors = append(errors, err)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookFailed = true
|
||||
}
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, hookFailed, hookErr)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, hookFailed, hookErr)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -239,7 +239,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
// containers to become ready.
|
||||
// Each unexecuted hook is logged as an error and this error will be returned from this function.
|
||||
for _, hooks := range byContainer {
|
||||
for i, hook := range hooks {
|
||||
for _, hook := range hooks {
|
||||
if hook.executed {
|
||||
continue
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
},
|
||||
)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
||||
@@ -706,6 +706,130 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple hooks with non-sequential indices (bug #9359)",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
groupResource: "pods",
|
||||
byContainer: map[string][]PodExecRestoreHook{
|
||||
"container1": {
|
||||
{
|
||||
HookName: "first-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/foo"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
{
|
||||
HookName: "second-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/bar"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 2,
|
||||
},
|
||||
{
|
||||
HookName: "third-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/third"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedExecutions: []expectedExecution{
|
||||
{
|
||||
name: "first-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/foo"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "second-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/bar"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "third-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/third"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
},
|
||||
expectedErrors: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -146,6 +146,9 @@ func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
|
||||
if len(con.PVCLabels) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels})
|
||||
}
|
||||
if len(con.PVCPhase) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcPhaseCondition{phases: con.PVCPhase})
|
||||
}
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
|
||||
@@ -191,6 +194,9 @@ func (p *Policies) GetMatchAction(res any) (*Action, error) {
|
||||
if data.PVC != nil {
|
||||
volume.parsePVC(data.PVC)
|
||||
}
|
||||
case data.PVC != nil:
|
||||
// Handle PVC-only scenarios (e.g., unbound PVCs)
|
||||
volume.parsePVC(data.PVC)
|
||||
default:
|
||||
return nil, errors.New("failed to convert object")
|
||||
}
|
||||
|
||||
@@ -983,6 +983,69 @@ volumePolicies:
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC phase matching - Pending phase should skip",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcPhase: ["Pending"]
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-pending",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PVC phase matching - Bound phase should not skip",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcPhase: ["Pending"]
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-bound",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC phase matching - Multiple phases (Pending, Lost)",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcPhase: ["Pending", "Lost"]
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-lost",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimLost,
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
@@ -1059,32 +1122,53 @@ func TestParsePVC(t *testing.T) {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectedLabels map[string]string
|
||||
expectedPhase string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid PVC with labels",
|
||||
name: "valid PVC with labels and Pending phase",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"env": "prod"},
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
expectedLabels: map[string]string{"env": "prod"},
|
||||
expectedPhase: "Pending",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid PVC with empty labels",
|
||||
name: "valid PVC with Bound phase",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
expectedLabels: nil,
|
||||
expectedPhase: "Bound",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid PVC with Lost phase",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimLost,
|
||||
},
|
||||
},
|
||||
expectedLabels: nil,
|
||||
expectedPhase: "Lost",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil PVC pointer",
|
||||
pvc: (*corev1api.PersistentVolumeClaim)(nil),
|
||||
expectedLabels: nil,
|
||||
expectedPhase: "",
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
@@ -1095,6 +1179,66 @@ func TestParsePVC(t *testing.T) {
|
||||
s.parsePVC(tc.pvc)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, s.pvcLabels)
|
||||
assert.Equal(t, tc.expectedPhase, s.pvcPhase)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCPhaseMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
condition *pvcPhaseCondition
|
||||
volume *structuredVolume
|
||||
expectedMatch bool
|
||||
}{
|
||||
{
|
||||
name: "match Pending phase",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending"}},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match multiple phases - Pending matches",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending", "Bound"}},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match multiple phases - Bound matches",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending", "Bound"}},
|
||||
volume: &structuredVolume{pvcPhase: "Bound"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "no match for different phase",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending"}},
|
||||
volume: &structuredVolume{pvcPhase: "Bound"},
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "no match for empty phase",
|
||||
condition: &pvcPhaseCondition{phases: []string{"Pending"}},
|
||||
volume: &structuredVolume{pvcPhase: ""},
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "match with empty phases list (always match)",
|
||||
condition: &pvcPhaseCondition{phases: []string{}},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match with nil phases list (always match)",
|
||||
condition: &pvcPhaseCondition{phases: nil},
|
||||
volume: &structuredVolume{pvcPhase: "Pending"},
|
||||
expectedMatch: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.condition.match(tc.volume)
|
||||
assert.Equal(t, tc.expectedMatch, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ type structuredVolume struct {
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
pvcLabels map[string]string
|
||||
pvcPhase string
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
@@ -70,8 +71,11 @@ func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePVC(pvc *corev1api.PersistentVolumeClaim) {
|
||||
if pvc != nil && len(pvc.GetLabels()) > 0 {
|
||||
s.pvcLabels = pvc.Labels
|
||||
if pvc != nil {
|
||||
if len(pvc.GetLabels()) > 0 {
|
||||
s.pvcLabels = pvc.Labels
|
||||
}
|
||||
s.pvcPhase = string(pvc.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,6 +114,31 @@ func (c *pvcLabelsCondition) validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// pvcPhaseCondition defines a condition that matches if the PVC's phase matches any of the provided phases.
|
||||
type pvcPhaseCondition struct {
|
||||
phases []string
|
||||
}
|
||||
|
||||
func (c *pvcPhaseCondition) match(v *structuredVolume) bool {
|
||||
// No phases specified: always match.
|
||||
if len(c.phases) == 0 {
|
||||
return true
|
||||
}
|
||||
if v.pvcPhase == "" {
|
||||
return false
|
||||
}
|
||||
for _, phase := range c.phases {
|
||||
if v.pvcPhase == phase {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *pvcPhaseCondition) validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type capacityCondition struct {
|
||||
capacity capacity
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ type volumeConditions struct {
|
||||
CSI *csiVolumeSource `yaml:"csi,omitempty"`
|
||||
VolumeTypes []SupportedVolume `yaml:"volumeTypes,omitempty"`
|
||||
PVCLabels map[string]string `yaml:"pvcLabels,omitempty"`
|
||||
PVCPhase []string `yaml:"pvcPhase,omitempty"`
|
||||
}
|
||||
|
||||
func (c *capacityCondition) validate() error {
|
||||
|
||||
@@ -170,6 +170,9 @@ type SnapshotDataMovementInfo struct {
|
||||
// Moved snapshot data size.
|
||||
Size int64 `json:"size"`
|
||||
|
||||
// Moved snapshot incremental size.
|
||||
IncrementalSize int64 `json:"incrementalSize,omitempty"`
|
||||
|
||||
// The DataUpload's Status.Phase value
|
||||
Phase velerov2alpha1.DataUploadPhase
|
||||
}
|
||||
@@ -217,6 +220,9 @@ type PodVolumeInfo struct {
|
||||
// The snapshot corresponding volume size.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// The incremental snapshot size.
|
||||
IncrementalSize int64 `json:"incrementalSize,omitempty"`
|
||||
|
||||
// The type of the uploader that uploads the data. The valid values are `kopia` and `restic`.
|
||||
UploaderType string `json:"uploaderType"`
|
||||
|
||||
@@ -240,14 +246,15 @@ type PodVolumeInfo struct {
|
||||
|
||||
func newPodVolumeInfoFromPVB(pvb *velerov1api.PodVolumeBackup) *PodVolumeInfo {
|
||||
return &PodVolumeInfo{
|
||||
SnapshotHandle: pvb.Status.SnapshotID,
|
||||
Size: pvb.Status.Progress.TotalBytes,
|
||||
UploaderType: pvb.Spec.UploaderType,
|
||||
VolumeName: pvb.Spec.Volume,
|
||||
PodName: pvb.Spec.Pod.Name,
|
||||
PodNamespace: pvb.Spec.Pod.Namespace,
|
||||
NodeName: pvb.Spec.Node,
|
||||
Phase: pvb.Status.Phase,
|
||||
SnapshotHandle: pvb.Status.SnapshotID,
|
||||
Size: pvb.Status.Progress.TotalBytes,
|
||||
IncrementalSize: pvb.Status.IncrementalBytes,
|
||||
UploaderType: pvb.Spec.UploaderType,
|
||||
VolumeName: pvb.Spec.Volume,
|
||||
PodName: pvb.Spec.Pod.Name,
|
||||
PodNamespace: pvb.Spec.Pod.Namespace,
|
||||
NodeName: pvb.Spec.Node,
|
||||
Phase: pvb.Status.Phase,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package volumehelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -33,8 +36,16 @@ type volumeHelperImpl struct {
|
||||
// to the volume policy check, but fs-backup is based on the pod resource,
|
||||
// the resource filter on PVC and PV doesn't work on this scenario.
|
||||
backupExcludePVC bool
|
||||
// pvcPodCache provides cached PVC to Pod mappings for improved performance.
|
||||
// When there are many PVCs and pods, using this cache avoids O(N*M) lookups.
|
||||
pvcPodCache *podvolumeutil.PVCPodCache
|
||||
}
|
||||
|
||||
// NewVolumeHelperImpl creates a VolumeHelper without PVC-to-Pod caching.
|
||||
//
|
||||
// Deprecated: Use NewVolumeHelperImplWithNamespaces or NewVolumeHelperImplWithCache instead
|
||||
// for better performance. These functions provide PVC-to-Pod caching which avoids O(N*M)
|
||||
// complexity when there are many PVCs and pods. See issue #9179 for details.
|
||||
func NewVolumeHelperImpl(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
@@ -43,6 +54,43 @@ func NewVolumeHelperImpl(
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
) VolumeHelper {
|
||||
// Pass nil namespaces - no cache will be built, so this never fails.
|
||||
// This is used by plugins that don't need the cache optimization.
|
||||
vh, _ := NewVolumeHelperImplWithNamespaces(
|
||||
volumePolicy,
|
||||
snapshotVolumes,
|
||||
logger,
|
||||
client,
|
||||
defaultVolumesToFSBackup,
|
||||
backupExcludePVC,
|
||||
nil,
|
||||
)
|
||||
return vh
|
||||
}
|
||||
|
||||
// NewVolumeHelperImplWithNamespaces creates a VolumeHelper with a PVC-to-Pod cache for improved performance.
|
||||
// The cache is built internally from the provided namespaces list.
|
||||
// This avoids O(N*M) complexity when there are many PVCs and pods.
|
||||
// See issue #9179 for details.
|
||||
// Returns an error if cache building fails - callers should not proceed with backup in this case.
|
||||
func NewVolumeHelperImplWithNamespaces(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
logger logrus.FieldLogger,
|
||||
client crclient.Client,
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
namespaces []string,
|
||||
) (VolumeHelper, error) {
|
||||
var pvcPodCache *podvolumeutil.PVCPodCache
|
||||
if len(namespaces) > 0 {
|
||||
pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
if err := pvcPodCache.BuildCacheForNamespaces(context.Background(), namespaces, client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.Infof("Built PVC-to-Pod cache for %d namespaces", len(namespaces))
|
||||
}
|
||||
|
||||
return &volumeHelperImpl{
|
||||
volumePolicy: volumePolicy,
|
||||
snapshotVolumes: snapshotVolumes,
|
||||
@@ -50,7 +98,33 @@ func NewVolumeHelperImpl(
|
||||
client: client,
|
||||
defaultVolumesToFSBackup: defaultVolumesToFSBackup,
|
||||
backupExcludePVC: backupExcludePVC,
|
||||
pvcPodCache: pvcPodCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewVolumeHelperImplWithCache creates a VolumeHelper using an externally managed PVC-to-Pod cache.
|
||||
// This is used by plugins that build the cache lazily per-namespace (following the pattern from PR #9226).
|
||||
// The cache can be nil, in which case PVC-to-Pod lookups will fall back to direct API calls.
|
||||
func NewVolumeHelperImplWithCache(
|
||||
backup velerov1api.Backup,
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
pvcPodCache *podvolumeutil.PVCPodCache,
|
||||
) (VolumeHelper, error) {
|
||||
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(backup, client, logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get volume policies from backup")
|
||||
}
|
||||
|
||||
return &volumeHelperImpl{
|
||||
volumePolicy: resourcePolicies,
|
||||
snapshotVolumes: backup.Spec.SnapshotVolumes,
|
||||
logger: logger,
|
||||
client: client,
|
||||
defaultVolumesToFSBackup: boolptr.IsSetToTrue(backup.Spec.DefaultVolumesToFsBackup),
|
||||
backupExcludePVC: boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData),
|
||||
pvcPodCache: pvcPodCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error) {
|
||||
@@ -60,6 +134,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
pv := new(corev1api.PersistentVolume)
|
||||
var err error
|
||||
|
||||
var pvNotFoundErr error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PVC")
|
||||
@@ -68,8 +143,10 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
|
||||
pv, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, err
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
pv = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +161,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for %+v", vfd)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -93,22 +170,30 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
// If there is no match action, go on to the next check.
|
||||
if action != nil {
|
||||
if action.Type == resourcepolicies.Snapshot {
|
||||
v.logger.Infof(fmt.Sprintf("performing snapshot action for pv %s", pv.Name))
|
||||
v.logger.Infof("performing snapshot action for %+v", vfd)
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skip snapshot action for pv %s as the action type is %s", pv.Name, action.Type)
|
||||
v.logger.Infof("Skip snapshot action for %+v as the action type is %s", vfd, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If resource is PVC, and PV is nil (e.g., Pending/Lost PVC with no matching policy), return the original error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims && pv == nil && pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, pvNotFoundErr
|
||||
}
|
||||
|
||||
// If this PV is claimed, see if we've already taken a (pod volume backup)
|
||||
// snapshot of the contents of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
pods, err := podvolumeutil.GetPodsUsingPVC(
|
||||
// Use cached lookup if available for better performance with many PVCs/pods
|
||||
pods, err := podvolumeutil.GetPodsUsingPVCWithCache(
|
||||
pv.Spec.ClaimRef.Namespace,
|
||||
pv.Spec.ClaimRef.Name,
|
||||
v.client,
|
||||
v.pvcPodCache,
|
||||
)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get pod for PV %s", pv.Name)
|
||||
@@ -133,7 +218,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
return true, nil
|
||||
}
|
||||
|
||||
v.logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s possibly due to no volume policy setting or snapshotVolumes is false", pv.Name))
|
||||
v.logger.Infof("skipping snapshot action for pv %s possibly due to no volume policy setting or snapshotVolumes is false", pv.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -143,6 +228,7 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var pvNotFoundErr error
|
||||
if v.volumePolicy != nil {
|
||||
var resource any
|
||||
var err error
|
||||
@@ -154,10 +240,13 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
|
||||
return false, err
|
||||
}
|
||||
resource, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
pvResource, err := kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, err
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
} else {
|
||||
resource = pvResource
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,6 +273,12 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no policy matched and PV was not found, return the original error
|
||||
if pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, pvNotFoundErr
|
||||
}
|
||||
}
|
||||
|
||||
if v.shouldPerformFSBackupLegacy(volume, pod) {
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
)
|
||||
|
||||
func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
@@ -285,7 +286,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "PVC not having PV, return false and error case PV not found",
|
||||
name: "PVC not having PV, return false and error when no matching policy",
|
||||
inputObj: builder.ForPersistentVolumeClaim("default", "example-pvc").StorageClass("gp2-csi").Result(),
|
||||
groupResource: kuberesource.PersistentVolumeClaims,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
@@ -738,3 +739,807 @@ func TestGetVolumeFromResource(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "resource is not a PersistentVolume or Volume")
|
||||
})
|
||||
}
|
||||
|
||||
func TestVolumeHelperImplWithCache_ShouldPerformSnapshot(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputObj runtime.Object
|
||||
groupResource schema.GroupResource
|
||||
pod *corev1api.Pod
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
buildCache bool
|
||||
shouldSnapshot bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumePolicy match with cache, returns true",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
buildCache: true,
|
||||
shouldSnapshot: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumePolicy not match, fs-backup via opt-out with cache, skips snapshot",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp3-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
defaultVolumesToFSBackup: true,
|
||||
buildCache: true,
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Cache not built, falls back to direct lookup",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
buildCache: false,
|
||||
shouldSnapshot: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "No volume policy, defaultVolumesToFSBackup with cache, skips snapshot",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result(),
|
||||
resourcePolicies: nil,
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
defaultVolumesToFSBackup: true,
|
||||
buildCache: true,
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
objs := []runtime.Object{
|
||||
&corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
if tc.pod != nil {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
if tc.buildCache {
|
||||
namespaces = []string{"ns"}
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithNamespaces(
|
||||
p,
|
||||
tc.snapshotVolumesFlag,
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
tc.defaultVolumesToFSBackup,
|
||||
false,
|
||||
namespaces,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.inputObj)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, tc.groupResource)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldSnapshot, actualShouldSnapshot, "Want shouldSnapshot as %t; Got shouldSnapshot as %t", tc.shouldSnapshot, actualShouldSnapshot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeHelperImplWithCache_ShouldPerformFSBackup(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
resources []runtime.Object
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
buildCache bool
|
||||
shouldFSBackup bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumePolicy match with cache, return true",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.FSBackup,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildCache: true,
|
||||
shouldFSBackup: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumePolicy match with cache, action is snapshot, return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildCache: true,
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Cache not built, falls back to direct lookup, opt-in annotation",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToBackupAnnotation, "vol-1")).
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
buildCache: false,
|
||||
defaultVolumesToFSBackup: false,
|
||||
shouldFSBackup: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.resources...)
|
||||
if tc.pod != nil {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
if tc.buildCache {
|
||||
namespaces = []string{"ns"}
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithNamespaces(
|
||||
p,
|
||||
tc.snapshotVolumesFlag,
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
tc.defaultVolumesToFSBackup,
|
||||
false,
|
||||
namespaces,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldFSBackup, actualError := vh.ShouldPerformFSBackup(tc.pod.Spec.Volumes[0], *tc.pod)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldFSBackup, actualShouldFSBackup, "Want shouldFSBackup as %t; Got shouldFSBackup as %t", tc.shouldFSBackup, actualShouldFSBackup)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewVolumeHelperImplWithCache tests the NewVolumeHelperImplWithCache constructor
|
||||
// which is used by plugins that build the cache lazily per-namespace.
|
||||
func TestNewVolumeHelperImplWithCache(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
backup velerov1api.Backup
|
||||
resourcePolicyConfigMap *corev1api.ConfigMap
|
||||
pvcPodCache bool // whether to pass a cache
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "creates VolumeHelper with nil cache",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(false),
|
||||
},
|
||||
},
|
||||
pvcPodCache: false,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "creates VolumeHelper with non-nil cache",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(true),
|
||||
SnapshotMoveData: ptr.To(true),
|
||||
},
|
||||
},
|
||||
pvcPodCache: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "creates VolumeHelper with resource policies",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "resource-policy",
|
||||
},
|
||||
},
|
||||
},
|
||||
resourcePolicyConfigMap: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "resource-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"policy": `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
storageClass:
|
||||
- gp2-csi
|
||||
action:
|
||||
type: snapshot`,
|
||||
},
|
||||
},
|
||||
pvcPodCache: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "fails when resource policy ConfigMap not found",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "non-existent-policy",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvcPodCache: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
if tc.resourcePolicyConfigMap != nil {
|
||||
objs = append(objs, tc.resourcePolicyConfigMap)
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
var cache *podvolumeutil.PVCPodCache
|
||||
if tc.pvcPodCache {
|
||||
cache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithCache(
|
||||
tc.backup,
|
||||
fakeClient,
|
||||
logrus.StandardLogger(),
|
||||
cache,
|
||||
)
|
||||
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, vh)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewVolumeHelperImplWithCache_UsesCache verifies that the VolumeHelper created
|
||||
// via NewVolumeHelperImplWithCache actually uses the provided cache for lookups.
|
||||
func TestNewVolumeHelperImplWithCache_UsesCache(t *testing.T) {
|
||||
// Create a pod that uses a PVC via opt-out (defaultVolumesToFsBackup=true)
|
||||
pod := builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result()
|
||||
|
||||
pvc := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
}
|
||||
|
||||
pv := builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result()
|
||||
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, pvc, pv, pod)
|
||||
|
||||
// Build cache for the namespace
|
||||
cache := podvolumeutil.NewPVCPodCache()
|
||||
err := cache.BuildCacheForNamespace(t.Context(), "ns", fakeClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
backup := velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(true), // opt-out mode
|
||||
},
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithCache(backup, fakeClient, logrus.StandardLogger(), cache)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert PV to unstructured
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pv)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ShouldPerformSnapshot should return false because the volume is selected for fs-backup
|
||||
// This relies on the cache to find the pod using the PVC
|
||||
shouldSnapshot, err := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, kuberesource.PersistentVolumes)
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldSnapshot, "Expected snapshot to be skipped due to fs-backup selection via cache")
|
||||
}
|
||||
|
||||
// TestVolumeHelperImpl_ShouldPerformSnapshot_UnboundPVC tests that Pending and Lost PVCs with
|
||||
// phase-based skip policies don't cause errors when GetPVForPVC would fail.
|
||||
func TestVolumeHelperImpl_ShouldPerformSnapshot_UnboundPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputPVC *corev1api.PersistentVolumeClaim
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
shouldSnapshot bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with phase-based skip policy should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-pending").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC without matching skip policy should error (no PV)",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-pending-no-policy").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with phase-based skip policy should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with policy for Pending and Lost should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
vh := NewVolumeHelperImpl(
|
||||
p,
|
||||
ptr.To(true),
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.inputPVC)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, kuberesource.PersistentVolumeClaims)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError, "Want error; Got nil error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldSnapshot, actualShouldSnapshot, "Want shouldSnapshot as %t; Got shouldSnapshot as %t", tc.shouldSnapshot, actualShouldSnapshot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeHelperImpl_ShouldPerformFSBackup_UnboundPVC tests that Pending and Lost PVCs with
|
||||
// phase-based skip policies don't cause errors when GetPVForPVC would fail.
|
||||
func TestVolumeHelperImpl_ShouldPerformFSBackup_UnboundPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
shouldFSBackup bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with phase-based skip policy should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-pending",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-pending",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-pending").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC without matching skip policy should error (no PV)",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-pending",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-pending-no-policy",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-pending-no-policy").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with phase-based skip policy should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-lost",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-lost",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with policy for Pending and Lost should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-lost",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-lost",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.pvc)
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
vh := NewVolumeHelperImpl(
|
||||
p,
|
||||
ptr.To(true),
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
actualShouldFSBackup, actualError := vh.ShouldPerformFSBackup(tc.pod.Spec.Volumes[0], *tc.pod)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError, "Want error; Got nil error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldFSBackup, actualShouldFSBackup, "Want shouldFSBackup as %t; Got shouldFSBackup as %t", tc.shouldFSBackup, actualShouldFSBackup)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,7 +288,7 @@ const (
|
||||
|
||||
// BackupPhase is a string representation of the lifecycle phase
|
||||
// of a Velero backup.
|
||||
// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting
|
||||
// +kubebuilder:validation:Enum=New;Queued;ReadyToStart;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting
|
||||
type BackupPhase string
|
||||
|
||||
const (
|
||||
@@ -296,6 +296,12 @@ const (
|
||||
// yet processed by the BackupController.
|
||||
BackupPhaseNew BackupPhase = "New"
|
||||
|
||||
// BackupPhaseQueued means the backup has been added to the queue and is waiting for the Queue to move it out of the queue.
|
||||
BackupPhaseQueued BackupPhase = "Queued"
|
||||
|
||||
// BackupPhaseReadyToStart means the backup has been pulled from the queue and is ready to start.
|
||||
BackupPhaseReadyToStart BackupPhase = "ReadyToStart"
|
||||
|
||||
// BackupPhaseFailedValidation means the backup has failed
|
||||
// the controller's validations and therefore will not run.
|
||||
BackupPhaseFailedValidation BackupPhase = "FailedValidation"
|
||||
@@ -371,6 +377,11 @@ type BackupStatus struct {
|
||||
// +optional
|
||||
Phase BackupPhase `json:"phase,omitempty"`
|
||||
|
||||
// QueuePosition is the position of the backup in the queue.
|
||||
// Only relevant when Phase is "Queued"
|
||||
// +optional
|
||||
QueuePosition int `json:"queuePosition,omitempty"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable).
|
||||
// +optional
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -146,8 +148,15 @@ type ObjectStorageLocation struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
// Deprecated: Use CACertRef instead.
|
||||
// +optional
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
|
||||
// CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
// when verifying TLS connections to the provider. The Secret must be in the same
|
||||
// namespace as the BackupStorageLocation.
|
||||
// +optional
|
||||
CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation.
|
||||
@@ -177,3 +186,13 @@ const (
|
||||
|
||||
// TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus.
|
||||
// TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus.
|
||||
|
||||
// Validate validates the BackupStorageLocation to ensure that only one of CACert or CACertRef is set.
|
||||
func (bsl *BackupStorageLocation) Validate() error {
|
||||
if bsl.Spec.ObjectStorage != nil &&
|
||||
bsl.Spec.ObjectStorage.CACert != nil &&
|
||||
bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
return errors.New("cannot specify both caCert and caCertRef in objectStorage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
121
pkg/apis/velero/v1/backupstoragelocation_types_test.go
Normal file
121
pkg/apis/velero/v1/backupstoragelocation_types_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestBackupStorageLocationValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bsl *BackupStorageLocation
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid - neither CACert nor CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - only CACert set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("test-cert"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - only CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "ca-cert-secret",
|
||||
},
|
||||
Key: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - both CACert and CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("test-cert"),
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "ca-cert-secret",
|
||||
},
|
||||
Key: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "valid - no ObjectStorage",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := test.bsl.Validate()
|
||||
if test.expectError && err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
if !test.expectError && err != nil {
|
||||
t.Errorf("expected no error but got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -118,6 +118,10 @@ type PodVolumeBackupStatus struct {
|
||||
// +optional
|
||||
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
|
||||
|
||||
// IncrementalBytes holds the number of bytes new or changed since the last backup
|
||||
// +optional
|
||||
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
|
||||
|
||||
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
|
||||
// The server's time is used for AcceptedTimestamp
|
||||
// +optional
|
||||
@@ -134,6 +138,7 @@ type PodVolumeBackupStatus struct {
|
||||
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
|
||||
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
|
||||
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
|
||||
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
|
||||
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
|
||||
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"
|
||||
|
||||
@@ -58,6 +58,10 @@ type PodVolumeRestoreSpec struct {
|
||||
// Cancel indicates request to cancel the ongoing PodVolumeRestore. It can be set
|
||||
// when the PodVolumeRestore is in InProgress phase
|
||||
Cancel bool `json:"cancel,omitempty"`
|
||||
|
||||
// SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
// +optional
|
||||
SnapshotSize int64 `json:"snapshotSize,omitempty"`
|
||||
}
|
||||
|
||||
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
|
||||
|
||||
@@ -915,6 +915,11 @@ func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) {
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.CACertRef != nil {
|
||||
in, out := &in.CACertRef, &out.CACertRef
|
||||
*out = new(corev1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation.
|
||||
|
||||
@@ -58,6 +58,10 @@ type DataDownloadSpec struct {
|
||||
// NodeOS is OS of the node where the DataDownload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
|
||||
// SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
// +optional
|
||||
SnapshotSize int64 `json:"snapshotSize,omitempty"`
|
||||
}
|
||||
|
||||
// TargetVolumeSpec is the specification for a target PVC.
|
||||
|
||||
@@ -155,6 +155,10 @@ type DataUploadStatus struct {
|
||||
// +optional
|
||||
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
|
||||
|
||||
// IncrementalBytes holds the number of bytes new or changed since the last backup
|
||||
// +optional
|
||||
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
|
||||
|
||||
// Node is name of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
Node string `json:"node,omitempty"`
|
||||
@@ -185,6 +189,7 @@ type DataUploadStatus struct {
|
||||
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started"
|
||||
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
|
||||
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
|
||||
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
|
||||
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created"
|
||||
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed"
|
||||
@@ -244,4 +249,8 @@ type DataUploadResult struct {
|
||||
// NodeOS is OS of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
|
||||
// SnapshotSize is the logical size in Bytes of the snapshot.
|
||||
// +optional
|
||||
SnapshotSize int64 `json:"snapshotSize,omitempty"`
|
||||
}
|
||||
|
||||
@@ -76,14 +76,8 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
pvc.Spec.Selector = nil
|
||||
}
|
||||
|
||||
// remove label selectors with "velero.io/" prefixing in the key which is left by Velero restore
|
||||
if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil {
|
||||
for k := range pvc.Spec.Selector.MatchLabels {
|
||||
if strings.HasPrefix(k, "velero.io/") {
|
||||
delete(pvc.Spec.Selector.MatchLabels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean stale Velero labels from PVC metadata and selector
|
||||
a.cleanupStaleVeleroLabels(pvc, backup)
|
||||
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
@@ -92,3 +86,50 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
|
||||
return &unstructured.Unstructured{Object: pvcMap}, actionhelpers.RelatedItemsForPVC(pvc, a.log), nil
|
||||
}
|
||||
|
||||
// cleanupStaleVeleroLabels removes stale Velero labels from both the PVC metadata
|
||||
// and the selector's match labels to ensure clean backups
|
||||
func (a *PVCAction) cleanupStaleVeleroLabels(pvc *corev1api.PersistentVolumeClaim, backup *v1.Backup) {
|
||||
// Clean stale Velero labels from selector match labels
|
||||
if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil {
|
||||
for k := range pvc.Spec.Selector.MatchLabels {
|
||||
if strings.HasPrefix(k, "velero.io/") {
|
||||
a.log.Infof("Deleting stale Velero label %s from PVC %s selector", k, pvc.Name)
|
||||
delete(pvc.Spec.Selector.MatchLabels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean stale Velero labels from main metadata
|
||||
if pvc.Labels != nil {
|
||||
for k, v := range pvc.Labels {
|
||||
// Only remove labels that are clearly stale from previous operations
|
||||
shouldRemove := false
|
||||
|
||||
// Always remove restore-name labels as these are from previous restores
|
||||
if k == v1.RestoreNameLabel {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if k == v1.MustIncludeAdditionalItemAnnotation {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
// Remove backup-name labels that don't match current backup
|
||||
if k == v1.BackupNameLabel && v != backup.Name {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
// Remove volume-snapshot-name labels from previous CSI backups
|
||||
// Note: If this backup creates new CSI snapshots, the CSI action will add them back
|
||||
if k == v1.VolumeSnapshotLabel {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
a.log.Infof("Deleting stale Velero label %s=%s from PVC %s", k, v, pvc.Name)
|
||||
delete(pvc.Labels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,3 +149,176 @@ func TestBackupPVAction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, additional)
|
||||
}
|
||||
|
||||
func TestCleanupStaleVeleroLabels(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputPVC *corev1api.PersistentVolumeClaim
|
||||
backup *v1.Backup
|
||||
expectedLabels map[string]string
|
||||
expectedSelector *metav1.LabelSelector
|
||||
}{
|
||||
{
|
||||
name: "removes restore-name labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes backup-name labels that don't match current backup",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "keeps backup-name labels that match current backup",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/backup-name": "current-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"velero.io/backup-name": "current-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes volume-snapshot-name labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/volume-snapshot-name": "old-snapshot",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes velero labels from selector match labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: nil,
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles PVC with no labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: nil,
|
||||
},
|
||||
{
|
||||
name: "handles PVC with no selector",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
expectedSelector: nil,
|
||||
},
|
||||
{
|
||||
name: "removes multiple stale velero labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"velero.io/volume-snapshot-name": "old-snapshot",
|
||||
"app": "myapp",
|
||||
"env": "prod",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
"env": "prod",
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
action := NewPVCAction(velerotest.NewLogger())
|
||||
|
||||
// Create a copy of the input PVC to avoid modifying the test case
|
||||
pvcCopy := tc.inputPVC.DeepCopy()
|
||||
|
||||
action.cleanupStaleVeleroLabels(pvcCopy, tc.backup)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, pvcCopy.Labels, "Labels should match expected values")
|
||||
assert.Equal(t, tc.expectedSelector, pvcCopy.Spec.Selector, "Selector should match expected values")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
internalvolumehelper "github.com/vmware-tanzu/velero/internal/volumehelper"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
@@ -57,6 +58,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
)
|
||||
|
||||
// TODO: Replace hardcoded VolumeSnapshot finalizer strings with constants from
|
||||
@@ -72,6 +74,14 @@ const (
|
||||
type pvcBackupItemAction struct {
|
||||
log logrus.FieldLogger
|
||||
crClient crclient.Client
|
||||
|
||||
// pvcPodCache provides lazy per-namespace caching of PVC-to-Pod mappings.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this
|
||||
// without mutex or backup UID tracking.
|
||||
// This avoids the O(N*M) performance issue when there are many PVCs and pods.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
pvcPodCache *podvolumeutil.PVCPodCache
|
||||
}
|
||||
|
||||
// AppliesTo returns information indicating that the PVCBackupItemAction
|
||||
@@ -97,6 +107,59 @@ func (p *pvcBackupItemAction) validateBackup(backup velerov1api.Backup) (valid b
|
||||
return true
|
||||
}
|
||||
|
||||
// ensurePVCPodCacheForNamespace ensures the PVC-to-Pod cache is built for the given namespace.
|
||||
// This uses lazy per-namespace caching following the pattern from PR #9226.
|
||||
// Since plugin instances are unique per backup, we can safely cache without mutex or backup UID tracking.
|
||||
func (p *pvcBackupItemAction) ensurePVCPodCacheForNamespace(ctx context.Context, namespace string) error {
|
||||
// Initialize cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
// Build cache for namespace if not already done
|
||||
if !p.pvcPodCache.IsNamespaceBuilt(namespace) {
|
||||
p.log.Debugf("Building PVC-to-Pod cache for namespace %s", namespace)
|
||||
if err := p.pvcPodCache.BuildCacheForNamespace(ctx, namespace, p.crClient); err != nil {
|
||||
return errors.Wrapf(err, "failed to build PVC-to-Pod cache for namespace %s", namespace)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolumeHelperWithCache creates a VolumeHelper using the pre-built PVC-to-Pod cache.
|
||||
// The cache should be ensured for the relevant namespace(s) before calling this.
|
||||
func (p *pvcBackupItemAction) getVolumeHelperWithCache(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
// Create VolumeHelper with our lazy-built cache
|
||||
vh, err := internalvolumehelper.NewVolumeHelperImplWithCache(
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
p.pvcPodCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create VolumeHelper")
|
||||
}
|
||||
return vh, nil
|
||||
}
|
||||
|
||||
// getOrCreateVolumeHelper returns a VolumeHelper with lazy per-namespace caching.
|
||||
// The VolumeHelper uses the pvcPodCache which is populated lazily as namespaces are encountered.
|
||||
// Callers should use ensurePVCPodCacheForNamespace before calling methods that need
|
||||
// PVC-to-Pod lookups for a specific namespace.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
func (p *pvcBackupItemAction) getOrCreateVolumeHelper(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
// Initialize the PVC-to-Pod cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
// Return the VolumeHelper with our lazily-built cache
|
||||
// The cache will be populated incrementally as namespaces are encountered
|
||||
return p.getVolumeHelperWithCache(backup)
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) validatePVCandPV(
|
||||
pvc corev1api.PersistentVolumeClaim,
|
||||
item runtime.Unstructured,
|
||||
@@ -248,12 +311,24 @@ func (p *pvcBackupItemAction) Execute(
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
|
||||
// Ensure PVC-to-Pod cache is built for this namespace (lazy per-namespace caching)
|
||||
if err := p.ensurePVCPodCacheForNamespace(context.TODO(), pvc.Namespace); err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
// Get or create the cached VolumeHelper for this backup
|
||||
vh, err := p.getOrCreateVolumeHelper(backup)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
item,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
@@ -621,8 +696,41 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace)
|
||||
}
|
||||
|
||||
// Ensure PVC-to-Pod cache is built for this namespace (lazy per-namespace caching)
|
||||
if err := p.ensurePVCPodCacheForNamespace(ctx, pvc.Namespace); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build PVC-to-Pod cache for namespace %s", pvc.Namespace)
|
||||
}
|
||||
|
||||
// Get the cached VolumeHelper for filtering PVCs by volume policy
|
||||
vh, err := p.getOrCreateVolumeHelper(backup)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get VolumeHelper for filtering PVCs in group %q", group)
|
||||
}
|
||||
|
||||
// Filter PVCs by volume policy
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup, vh)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
|
||||
// Warn if any PVCs were filtered out
|
||||
if len(filteredPVCs) < len(groupedPVCs) {
|
||||
for _, originalPVC := range groupedPVCs {
|
||||
found := false
|
||||
for _, filteredPVC := range filteredPVCs {
|
||||
if originalPVC.Name == filteredPVC.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
p.log.Warnf("PVC %s/%s has VolumeGroupSnapshot label %s=%s but is excluded by volume policy", originalPVC.Namespace, originalPVC.Name, vgsLabelKey, group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the CSI driver for the grouped PVCs
|
||||
driver, err := p.determineCSIDriver(groupedPVCs)
|
||||
driver, err := p.determineCSIDriver(filteredPVCs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to determine CSI driver for PVCs in VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
@@ -643,7 +751,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
}
|
||||
|
||||
// Wait for all the VS objects associated with the VGS to have status and VGS Name (VS readiness is checked in legacy flow) and get the PVC-to-VS map
|
||||
vsMap, err := p.waitForVGSAssociatedVS(ctx, groupedPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
vsMap, err := p.waitForVGSAssociatedVS(ctx, filteredPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "timeout waiting for VolumeSnapshots to have status created via VolumeGroupSnapshot %s", newVGS.Name)
|
||||
}
|
||||
@@ -734,6 +842,43 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
|
||||
return pvcList.Items, nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
backup *velerov1api.Backup,
|
||||
vh internalvolumehelper.VolumeHelper,
|
||||
) ([]corev1api.PersistentVolumeClaim, error) {
|
||||
var filteredPVCs []corev1api.PersistentVolumeClaim
|
||||
|
||||
for _, pvc := range pvcs {
|
||||
// Convert PVC to unstructured for ShouldPerformSnapshotWithVolumeHelper
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
// Check if this PVC should be snapshotted according to volume policies
|
||||
// Uses the cached VolumeHelper for better performance with many PVCs/pods
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
|
||||
if shouldSnapshot {
|
||||
filteredPVCs = append(filteredPVCs, pvc)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredPVCs, nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) determineCSIDriver(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
) (string, error) {
|
||||
|
||||
@@ -586,6 +586,387 @@ func TestListGroupedPVCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterPVCsByVolumePolicy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvcs []corev1api.PersistentVolumeClaim
|
||||
pvs []corev1api.PersistentVolume
|
||||
volumePolicyStr string
|
||||
expectCount int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "All PVCs should be included when no volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-1", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-1",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-2",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-1"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-2"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCount: 2,
|
||||
},
|
||||
{
|
||||
name: "Filter out NFS PVC by volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-csi",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 1,
|
||||
},
|
||||
{
|
||||
name: "All PVCs filtered out by volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-1", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs-1",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-2", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs-2",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-1"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export/1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-2"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export/2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 0,
|
||||
},
|
||||
{
|
||||
name: "Filter out non-CSI PVCs from mixed driver group",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-linstor",
|
||||
Namespace: "ns-1",
|
||||
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-linstor",
|
||||
StorageClassName: pointer.String("sc-linstor"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-nfs",
|
||||
Namespace: "ns-1",
|
||||
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-linstor"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "linstor.csi.linbit.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
objs := []runtime.Object{}
|
||||
for i := range tt.pvs {
|
||||
objs = append(objs, &tt.pvs[i])
|
||||
}
|
||||
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{},
|
||||
}
|
||||
|
||||
// Add volume policy ConfigMap if specified
|
||||
if tt.volumePolicyStr != "" {
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "volume-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"volume-policy": tt.volumePolicyStr,
|
||||
},
|
||||
}
|
||||
require.NoError(t, client.Create(t.Context(), cm))
|
||||
|
||||
backup.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "volume-policy",
|
||||
}
|
||||
}
|
||||
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
// Pass nil for VolumeHelper in tests - it will fall back to creating a new one per call
|
||||
// This is the expected behavior for testing and third-party plugins
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup, nil)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result, tt.expectCount)
|
||||
|
||||
// For mixed driver scenarios, verify filtered result can determine single CSI driver
|
||||
if tt.name == "Filter out non-CSI PVCs from mixed driver group" && len(result) > 0 {
|
||||
driver, err := action.determineCSIDriver(result)
|
||||
require.NoError(t, err, "After filtering, determineCSIDriver should not fail with multiple drivers error")
|
||||
require.Equal(t, "linstor.csi.linbit.com", driver, "Should have the Linstor driver after filtering out NFS")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterPVCsByVolumePolicyWithVolumeHelper tests filterPVCsByVolumePolicy when a
|
||||
// pre-created VolumeHelper is passed (non-nil). This exercises the cached path used
|
||||
// by the CSI PVC BIA plugin for better performance.
|
||||
func TestFilterPVCsByVolumePolicyWithVolumeHelper(t *testing.T) {
|
||||
// Create test PVCs and PVs
|
||||
pvcs := []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-csi",
|
||||
StorageClassName: pointer.String("sc-csi"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
}
|
||||
|
||||
pvs := []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake client with PVs
|
||||
objs := []runtime.Object{}
|
||||
for i := range pvs {
|
||||
objs = append(objs, &pvs[i])
|
||||
}
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
// Create backup with volume policy that skips NFS volumes
|
||||
volumePolicyStr := `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "volume-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"volume-policy": volumePolicyStr,
|
||||
},
|
||||
}
|
||||
require.NoError(t, client.Create(t.Context(), cm))
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "volume-policy",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
// Create a VolumeHelper using the same method the plugin would use
|
||||
vh, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
|
||||
// Test with the pre-created VolumeHelper (non-nil path)
|
||||
result, err := action.filterPVCsByVolumePolicy(pvcs, backup, vh)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should filter out the NFS PVC, leaving only the CSI PVC
|
||||
require.Len(t, result, 1)
|
||||
require.Equal(t, "pvc-csi", result[0].Name)
|
||||
}
|
||||
|
||||
func TestDetermineCSIDriver(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1685,3 +2066,42 @@ func TestPVCRequestSize(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetOrCreateVolumeHelper tests the VolumeHelper and PVC-to-Pod cache behavior.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we verify that the pvcPodCache
|
||||
// is properly initialized and reused across calls.
|
||||
func TestGetOrCreateVolumeHelper(t *testing.T) {
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
UID: types.UID("test-uid-1"),
|
||||
},
|
||||
}
|
||||
|
||||
// Initially, pvcPodCache should be nil
|
||||
require.Nil(t, action.pvcPodCache, "pvcPodCache should be nil initially")
|
||||
|
||||
// Get VolumeHelper first time - should create new cache and VolumeHelper
|
||||
vh1, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh1)
|
||||
|
||||
// pvcPodCache should now be initialized
|
||||
require.NotNil(t, action.pvcPodCache, "pvcPodCache should be initialized after first call")
|
||||
cache1 := action.pvcPodCache
|
||||
|
||||
// Get VolumeHelper second time - should reuse the same cache
|
||||
vh2, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh2)
|
||||
|
||||
// The pvcPodCache should be the same instance
|
||||
require.Same(t, cache1, action.pvcPodCache, "Expected same pvcPodCache instance on repeated calls")
|
||||
}
|
||||
|
||||
@@ -84,17 +84,6 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
return nil, nil, "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
additionalItems := make([]velero.ResourceIdentifier, 0)
|
||||
if vs.Spec.VolumeSnapshotClassName != nil {
|
||||
additionalItems = append(
|
||||
additionalItems,
|
||||
velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.VolumeSnapshotClasses,
|
||||
Name: *vs.Spec.VolumeSnapshotClassName,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
|
||||
p.log.
|
||||
@@ -105,6 +94,24 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
additionalItems := make([]velero.ResourceIdentifier, 0)
|
||||
|
||||
if vs.Spec.VolumeSnapshotClassName != nil {
|
||||
// This is still needed to add the VolumeSnapshotClass to the backup.
|
||||
// The secret with VolumeSnapshotClass is still relevant to backup.
|
||||
additionalItems = append(
|
||||
additionalItems,
|
||||
velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.VolumeSnapshotClasses,
|
||||
Name: *vs.Spec.VolumeSnapshotClassName,
|
||||
},
|
||||
)
|
||||
|
||||
// Because async operation will update VolumeSnapshot during finalizing phase.
|
||||
// No matter what we do, VolumeSnapshotClass cannot be deleted. So skip it.
|
||||
// Just deleting VolumeSnapshotClass during restore and delete is enough.
|
||||
}
|
||||
|
||||
p.log.Infof("Getting VolumesnapshotContent for Volumesnapshot %s/%s",
|
||||
vs.Namespace, vs.Name)
|
||||
|
||||
|
||||
@@ -97,6 +97,10 @@ func (p *volumeSnapshotContentBackupItemAction) Execute(
|
||||
})
|
||||
}
|
||||
|
||||
// Because async operation will update VolumeSnapshotContent during finalizing phase.
|
||||
// No matter what we do, VolumeSnapshotClass cannot be deleted. So skip it.
|
||||
// Just deleting VolumeSnapshotClass during restore and delete is enough.
|
||||
|
||||
snapContMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&snapCont)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, errors.WithStack(err)
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestVSCExecute(t *testing.T) {
|
||||
expectedItems []velero.ResourceIdentifier
|
||||
}{
|
||||
{
|
||||
name: "Invalid VolumeSnapshotClass",
|
||||
name: "Invalid VolumeSnapshotContent",
|
||||
item: velerotest.UnstructuredOrDie(
|
||||
`
|
||||
{
|
||||
|
||||
@@ -117,7 +117,6 @@ type kubernetesBackupper struct {
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
podVolumeBackupperFactory podvolume.BackupperFactory
|
||||
podVolumeTimeout time.Duration
|
||||
podVolumeContext context.Context
|
||||
defaultVolumesToFsBackup bool
|
||||
clientPageSize int
|
||||
uploaderType string
|
||||
@@ -168,10 +167,39 @@ func NewKubernetesBackupper(
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getNamespaceIncludesExcludes returns an IncludesExcludes list containing which namespaces to
|
||||
// include and exclude from the backup.
|
||||
func getNamespaceIncludesExcludes(backup *velerov1api.Backup) *collections.IncludesExcludes {
|
||||
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
|
||||
// getNamespaceIncludesExcludesAndArgoCDNamespaces returns an IncludesExcludes list containing which namespaces to
|
||||
// include and exclude from the backup and a list of namespaces managed by ArgoCD.
|
||||
func getNamespaceIncludesExcludesAndArgoCDNamespaces(backup *velerov1api.Backup, kbClient kbclient.Client) (*collections.NamespaceIncludesExcludes, []string, error) {
|
||||
nsList := corev1api.NamespaceList{}
|
||||
activeNamespaces := []string{}
|
||||
nsManagedByArgoCD := []string{}
|
||||
if err := kbClient.List(context.Background(), &nsList); err != nil {
|
||||
return nil, nsManagedByArgoCD, err
|
||||
}
|
||||
for _, ns := range nsList.Items {
|
||||
activeNamespaces = append(activeNamespaces, ns.Name)
|
||||
}
|
||||
|
||||
// Set ActiveNamespaces first, then set includes/excludes
|
||||
includesExcludes := collections.NewNamespaceIncludesExcludes().
|
||||
ActiveNamespaces(activeNamespaces).
|
||||
Includes(backup.Spec.IncludedNamespaces...).
|
||||
Excludes(backup.Spec.ExcludedNamespaces...)
|
||||
|
||||
// Expand wildcards if needed
|
||||
if err := includesExcludes.ExpandIncludesExcludes(); err != nil {
|
||||
return nil, []string{}, err
|
||||
}
|
||||
|
||||
// Check for ArgoCD managed namespaces in the namespaces that will be included
|
||||
for _, ns := range nsList.Items {
|
||||
nsLabels := ns.GetLabels()
|
||||
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 && includesExcludes.ShouldInclude(ns.Name) {
|
||||
nsManagedByArgoCD = append(nsManagedByArgoCD, ns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return includesExcludes, nsManagedByArgoCD, nil
|
||||
}
|
||||
|
||||
func getResourceHooks(hookSpecs []velerov1api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]hook.ResourceHook, error) {
|
||||
@@ -245,8 +273,35 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
if err := kb.writeBackupVersion(tw); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
var err error
|
||||
var nsManagedByArgoCD []string
|
||||
backupRequest.NamespaceIncludesExcludes, nsManagedByArgoCD, err = getNamespaceIncludesExcludesAndArgoCDNamespaces(backupRequest.Backup, kb.kbClient)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("error getting namespace includes/excludes")
|
||||
return err
|
||||
}
|
||||
|
||||
if backupRequest.NamespaceIncludesExcludes.IsWildcardExpanded() {
|
||||
expandedIncludes := backupRequest.NamespaceIncludesExcludes.GetIncludes()
|
||||
expandedExcludes := backupRequest.NamespaceIncludesExcludes.GetExcludes()
|
||||
|
||||
// Get the final namespace list after wildcard expansion
|
||||
wildcardResult, err := backupRequest.NamespaceIncludesExcludes.ResolveNamespaceList()
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("error resolving namespace list")
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"expandedIncludes": expandedIncludes,
|
||||
"expandedExcludes": expandedExcludes,
|
||||
"wildcardResult": wildcardResult,
|
||||
"includedCount": len(expandedIncludes),
|
||||
"excludedCount": len(expandedExcludes),
|
||||
"resultCount": len(wildcardResult),
|
||||
}).Info("Successfully expanded wildcard patterns")
|
||||
}
|
||||
|
||||
backupRequest.NamespaceIncludesExcludes = getNamespaceIncludesExcludes(backupRequest.Backup)
|
||||
log.Infof("Including namespaces: %s", backupRequest.NamespaceIncludesExcludes.IncludesString())
|
||||
log.Infof("Excluding namespaces: %s", backupRequest.NamespaceIncludesExcludes.ExcludesString())
|
||||
|
||||
@@ -254,12 +309,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
// We will check for the existence of a ArgoCD label in the includedNamespaces and add a warning
|
||||
// so that users are at least aware about the existence of argoCD managed ns in their backup
|
||||
// Related Issue: https://github.com/vmware-tanzu/velero/issues/7905
|
||||
if len(backupRequest.Spec.IncludedNamespaces) > 0 {
|
||||
nsManagedByArgoCD := getNamespacesManagedByArgoCD(kb.kbClient, backupRequest.Spec.IncludedNamespaces, log)
|
||||
|
||||
if len(nsManagedByArgoCD) > 0 {
|
||||
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
|
||||
}
|
||||
if len(nsManagedByArgoCD) > 0 {
|
||||
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
|
||||
}
|
||||
|
||||
if collections.UseOldResourceFilters(backupRequest.Spec) {
|
||||
@@ -284,7 +335,6 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
|
||||
log.Infof("Backing up all volumes using pod volume backup: %t", boolptr.IsSetToTrue(backupRequest.Backup.Spec.DefaultVolumesToFsBackup))
|
||||
|
||||
var err error
|
||||
backupRequest.ResourceHooks, err = getResourceHooks(backupRequest.Spec.Hooks.Resources, kb.discoveryHelper)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Debugf("Error from getResourceHooks")
|
||||
@@ -314,12 +364,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
|
||||
var podVolumeCancelFunc context.CancelFunc
|
||||
kb.podVolumeContext, podVolumeCancelFunc = context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
podVolumeContext, podVolumeCancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
defer podVolumeCancelFunc()
|
||||
|
||||
var podVolumeBackupper podvolume.Backupper
|
||||
if kb.podVolumeBackupperFactory != nil {
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(kb.podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper")
|
||||
return errors.WithStack(err)
|
||||
@@ -358,6 +408,28 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
|
||||
// Resolve namespaces for PVC-to-Pod cache building in volumehelper.
|
||||
// See issue #9179 for details.
|
||||
namespaces, err := backupRequest.NamespaceIncludesExcludes.ResolveNamespaceList()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to resolve namespace list for PVC-to-Pod cache")
|
||||
return err
|
||||
}
|
||||
|
||||
volumeHelperImpl, err := volumehelper.NewVolumeHelperImplWithNamespaces(
|
||||
backupRequest.ResPolicies,
|
||||
backupRequest.Spec.SnapshotVolumes,
|
||||
log,
|
||||
kb.kbClient,
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
namespaces,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to build PVC-to-Pod cache for volume policy lookups")
|
||||
return err
|
||||
}
|
||||
|
||||
itemBackupper := &itemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
@@ -365,20 +437,14 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
kbClient: kb.kbClient,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
podVolumeBackupper: podVolumeBackupper,
|
||||
podVolumeContext: podVolumeContext,
|
||||
podVolumeSnapshotTracker: podvolume.NewTracker(),
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
volumeSnapshotterCache: NewVolumeSnapshotterCache(volumeSnapshotterGetter),
|
||||
itemHookHandler: &hook.DefaultItemHookHandler{
|
||||
PodCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
volumeHelperImpl: volumehelper.NewVolumeHelperImpl(
|
||||
backupRequest.ResPolicies,
|
||||
backupRequest.Spec.SnapshotVolumes,
|
||||
log,
|
||||
kb.kbClient,
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
),
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
volumeHelperImpl: volumeHelperImpl,
|
||||
kubernetesBackupper: kb,
|
||||
}
|
||||
|
||||
@@ -546,7 +612,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
log.Infof("Backing Up Item Block including %s %s/%s (%v items in block)", items[i].groupResource.String(), items[i].namespace, items[i].name, len(itemBlock.Items))
|
||||
|
||||
wg.Add(1)
|
||||
backupRequest.ItemBlockChannel <- ItemBlockInput{
|
||||
backupRequest.WorkerPool.GetInputChannel() <- ItemBlockInput{
|
||||
itemBlock: itemBlock,
|
||||
returnChan: itemBlockReturn,
|
||||
}
|
||||
@@ -797,7 +863,7 @@ func (kb *kubernetesBackupper) handleItemBlockPostHooks(itemBlock *BackupItemBlo
|
||||
log := itemBlock.Log
|
||||
|
||||
// the post hooks will not execute until all PVBs of the item block pods are processed
|
||||
if err := kb.waitUntilPVBsProcessed(kb.podVolumeContext, log, itemBlock, hookPods); err != nil {
|
||||
if err := kb.waitUntilPVBsProcessed(itemBlock.itemBackupper.podVolumeContext, log, itemBlock, hookPods); err != nil {
|
||||
log.WithError(err).Error("failed to wait PVBs processed for the ItemBlock")
|
||||
return
|
||||
}
|
||||
@@ -1198,6 +1264,7 @@ func updateVolumeInfos(
|
||||
volumeInfos[index].SnapshotDataMovementInfo.SnapshotHandle = dataUpload.Status.SnapshotID
|
||||
volumeInfos[index].SnapshotDataMovementInfo.RetainedSnapshot = dataUpload.Spec.CSISnapshot.VolumeSnapshot
|
||||
volumeInfos[index].SnapshotDataMovementInfo.Size = dataUpload.Status.Progress.TotalBytes
|
||||
volumeInfos[index].SnapshotDataMovementInfo.IncrementalSize = dataUpload.Status.IncrementalBytes
|
||||
volumeInfos[index].SnapshotDataMovementInfo.Phase = dataUpload.Status.Phase
|
||||
|
||||
if dataUpload.Status.Phase == velerov2alpha1.DataUploadPhaseCompleted {
|
||||
@@ -1255,26 +1322,3 @@ func putVolumeInfos(
|
||||
|
||||
return backupStore.PutBackupVolumeInfos(backupName, backupVolumeInfoBuf)
|
||||
}
|
||||
|
||||
func getNamespacesManagedByArgoCD(kbClient kbclient.Client, includedNamespaces []string, log logrus.FieldLogger) []string {
|
||||
var nsManagedByArgoCD []string
|
||||
|
||||
for _, nsName := range includedNamespaces {
|
||||
ns := corev1api.Namespace{}
|
||||
if err := kbClient.Get(context.Background(), kbclient.ObjectKey{Name: nsName}, &ns); err != nil {
|
||||
// check for only those ns that exist and are included in backup
|
||||
// here we ignore cases like "" or "*" specified under includedNamespaces
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("error getting namespace %s", nsName)
|
||||
continue
|
||||
}
|
||||
|
||||
nsLabels := ns.GetLabels()
|
||||
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 {
|
||||
nsManagedByArgoCD = append(nsManagedByArgoCD, nsName)
|
||||
}
|
||||
}
|
||||
return nsManagedByArgoCD
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
@@ -141,7 +141,7 @@ func TestBackupProgressIsUpdated(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -881,7 +881,7 @@ func TestBackupOldResourceFiltering(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1062,7 +1062,7 @@ func TestCRDInclusion(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1161,7 +1161,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1190,7 +1190,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
backup1File := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -1206,7 +1206,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: &h.itemBlockPool,
|
||||
}
|
||||
backup2File := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -1260,7 +1260,7 @@ func TestBackupResourceOrdering(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1381,7 +1381,7 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
Backup: defaultBackup().SnapshotVolumes(false).Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
resPolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
@@ -1428,8 +1428,8 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
},
|
||||
includedPVs: map[string]struct{}{},
|
||||
},
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVCs(
|
||||
@@ -1679,7 +1679,7 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1764,7 +1764,7 @@ func TestBackupWithInvalidActions(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1918,7 +1918,7 @@ func TestBackupActionModifications(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2178,7 +2178,7 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2439,7 +2439,7 @@ func TestItemBlockActionsRunForCorrectItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2524,7 +2524,7 @@ func TestBackupWithInvalidItemBlockActions(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2780,7 +2780,7 @@ func TestItemBlockActionRelatedItems(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2948,7 +2948,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -2984,7 +2984,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3021,7 +3021,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3058,7 +3058,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3095,7 +3095,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3130,7 +3130,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3148,7 +3148,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3169,7 +3169,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3188,7 +3188,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3210,7 +3210,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3269,7 +3269,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
err := h.backupper.Backup(h.log, tc.req, backupFile, nil, nil, tc.snapshotterGetter)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tc.want, tc.req.VolumeSnapshots)
|
||||
assert.Equal(t, tc.want, tc.req.VolumeSnapshots.Get())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3344,7 +3344,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3376,7 +3376,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3408,7 +3408,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3494,7 +3494,7 @@ func TestBackupWithInvalidHooks(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -3968,7 +3968,7 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
podCommandExecutor = new(test.MockPodCommandExecutor)
|
||||
@@ -4193,7 +4193,7 @@ func TestBackupWithPodVolume(t *testing.T) {
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{tc.vsl},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -4213,7 +4213,7 @@ func TestBackupWithPodVolume(t *testing.T) {
|
||||
assert.Equal(t, tc.want, req.PodVolumeBackups)
|
||||
|
||||
// this assumes that we don't have any test cases where some PVs should be snapshotted using a VolumeSnapshotter
|
||||
assert.Nil(t, req.VolumeSnapshots)
|
||||
assert.Nil(t, req.VolumeSnapshots.Get())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5312,7 +5312,7 @@ func TestBackupNewResourceFiltering(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5477,7 +5477,7 @@ func TestBackupNamespaces(t *testing.T) {
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
WorkerPool: itemBlockPool,
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5578,6 +5578,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
IncrementalBytes(500).
|
||||
Phase(velerov2alpha1.DataUploadPhaseFailed).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
@@ -5603,6 +5604,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
RetainedSnapshot: "vs-1",
|
||||
SnapshotHandle: "snapshot-id",
|
||||
Size: 1000,
|
||||
IncrementalSize: 500,
|
||||
Phase: velerov2alpha1.DataUploadPhaseFailed,
|
||||
},
|
||||
},
|
||||
@@ -5616,6 +5618,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
IncrementalBytes(500).
|
||||
Phase(velerov2alpha1.DataUploadPhaseCompleted).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
@@ -5641,6 +5644,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
RetainedSnapshot: "vs-1",
|
||||
SnapshotHandle: "snapshot-id",
|
||||
Size: 1000,
|
||||
IncrementalSize: 500,
|
||||
Phase: velerov2alpha1.DataUploadPhaseCompleted,
|
||||
},
|
||||
},
|
||||
@@ -5655,6 +5659,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
IncrementalBytes(500).
|
||||
Phase(velerov2alpha1.DataUploadPhaseCompleted).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
|
||||
@@ -69,14 +69,13 @@ type itemBackupper struct {
|
||||
kbClient kbClient.Client
|
||||
discoveryHelper discovery.Helper
|
||||
podVolumeBackupper podvolume.Backupper
|
||||
podVolumeContext context.Context
|
||||
podVolumeSnapshotTracker *podvolume.Tracker
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
kubernetesBackupper *kubernetesBackupper
|
||||
|
||||
itemHookHandler hook.ItemHookHandler
|
||||
snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter
|
||||
hookTracker *hook.HookTracker
|
||||
volumeHelperImpl volumehelper.VolumeHelper
|
||||
volumeSnapshotterCache *VolumeSnapshotterCache
|
||||
itemHookHandler hook.ItemHookHandler
|
||||
hookTracker *hook.HookTracker
|
||||
volumeHelperImpl volumehelper.VolumeHelper
|
||||
}
|
||||
|
||||
type FileForArchive struct {
|
||||
@@ -502,30 +501,6 @@ func (ib *itemBackupper) executeActions(
|
||||
return obj, itemFiles, nil
|
||||
}
|
||||
|
||||
// volumeSnapshotter instantiates and initializes a VolumeSnapshotter given a VolumeSnapshotLocation,
|
||||
// or returns an existing one if one's already been initialized for the location.
|
||||
func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) {
|
||||
if bs, ok := ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name]; ok {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
bs, err := ib.volumeSnapshotterGetter.GetVolumeSnapshotter(snapshotLocation.Spec.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := bs.Init(snapshotLocation.Spec.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ib.snapshotLocationVolumeSnapshotters == nil {
|
||||
ib.snapshotLocationVolumeSnapshotters = make(map[string]vsv1.VolumeSnapshotter)
|
||||
}
|
||||
ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name] = bs
|
||||
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// zoneLabelDeprecated is the label that stores availability-zone info
|
||||
// on PVs this is deprecated on Kubernetes >= 1.17.0
|
||||
// zoneLabel is the label that stores availability-zone info
|
||||
@@ -641,7 +616,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
for _, snapshotLocation := range ib.backupRequest.SnapshotLocations {
|
||||
log := log.WithField("volumeSnapshotLocation", snapshotLocation.Name)
|
||||
|
||||
bs, err := ib.volumeSnapshotter(snapshotLocation)
|
||||
bs, err := ib.volumeSnapshotterCache.SetNX(snapshotLocation)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting volume snapshotter for volume snapshot location")
|
||||
continue
|
||||
@@ -699,7 +674,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
snapshot.Status.Phase = volume.SnapshotPhaseCompleted
|
||||
snapshot.Status.ProviderSnapshotID = snapshotID
|
||||
}
|
||||
ib.backupRequest.VolumeSnapshots = append(ib.backupRequest.VolumeSnapshots, snapshot)
|
||||
ib.backupRequest.VolumeSnapshots.Add(snapshot)
|
||||
|
||||
// nil errors are automatically removed
|
||||
return kubeerrs.NewAggregate(errs)
|
||||
@@ -712,15 +687,14 @@ func (ib *itemBackupper) getMatchAction(obj runtime.Unstructured, groupResource
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName == "" {
|
||||
return nil, errors.Errorf("PVC has no volume backing this claim")
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
var pv *corev1api.PersistentVolume
|
||||
if pvName := pvc.Spec.VolumeName; pvName != "" {
|
||||
pv = &corev1api.PersistentVolume{}
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
// If pv is nil for unbound PVCs - policy matching will use PVC-only conditions
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
return ib.backupRequest.ResPolicies.GetMatchAction(vfd)
|
||||
}
|
||||
@@ -734,7 +708,10 @@ func (ib *itemBackupper) trackSkippedPV(obj runtime.Unstructured, groupResource
|
||||
if name, err := getPVName(obj, groupResource); len(name) > 0 && err == nil {
|
||||
ib.backupRequest.SkippedPVTracker.Track(name, approach, reason)
|
||||
} else if err != nil {
|
||||
log.WithError(err).Warnf("unable to get PV name, skip tracking.")
|
||||
// Log at info level for tracking purposes. This is not an error because
|
||||
// it's expected for some resources (e.g., PVCs in Pending or Lost phase)
|
||||
// to not have a PV name. This occurs when volume policy skips unbound PVCs.
|
||||
log.WithError(err).Infof("unable to get PV name, skip tracking.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -744,6 +721,17 @@ func (ib *itemBackupper) unTrackSkippedPV(obj runtime.Unstructured, groupResourc
|
||||
if name, err := getPVName(obj, groupResource); len(name) > 0 && err == nil {
|
||||
ib.backupRequest.SkippedPVTracker.Untrack(name)
|
||||
} else if err != nil {
|
||||
// For PVCs in Pending or Lost phase, it's expected that there's no PV name.
|
||||
// Log at debug level instead of warning to reduce noise.
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
pvc := new(corev1api.PersistentVolumeClaim)
|
||||
if convErr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); convErr == nil {
|
||||
if pvc.Status.Phase == corev1api.ClaimPending || pvc.Status.Phase == corev1api.ClaimLost {
|
||||
log.WithError(err).Debugf("unable to get PV name for %s PVC, skip untracking.", pvc.Status.Phase)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
log.WithError(err).Warnf("unable to get PV name, skip untracking.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,12 +17,15 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -269,3 +272,225 @@ func TestAddVolumeInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMatchAction_PendingLostPVC(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
require.NoError(t, corev1api.AddToScheme(scheme))
|
||||
|
||||
// Create resource policies that skip Pending/Lost PVCs
|
||||
resPolicies := &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
policies := &resourcepolicies.Policies{}
|
||||
err := policies.BuildPolicy(resPolicies)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
expectedAction *resourcepolicies.Action
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with no VolumeName should match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
StorageClass("test-sc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
pv: nil,
|
||||
expectedAction: &resourcepolicies.Action{Type: resourcepolicies.Skip},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with no VolumeName should match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
StorageClass("test-sc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
pv: nil,
|
||||
expectedAction: &resourcepolicies.Action{Type: resourcepolicies.Skip},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Bound PVC with VolumeName and matching PV should not match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
StorageClass("test-sc").
|
||||
VolumeName("test-pv").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
pv: builder.ForPersistentVolume("test-pv").StorageClass("test-sc").Result(),
|
||||
expectedAction: nil,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Build fake client with PV if present
|
||||
clientBuilder := ctrlfake.NewClientBuilder().WithScheme(scheme)
|
||||
if tc.pv != nil {
|
||||
clientBuilder = clientBuilder.WithObjects(tc.pv)
|
||||
}
|
||||
fakeClient := clientBuilder.Build()
|
||||
|
||||
ib := &itemBackupper{
|
||||
kbClient: fakeClient,
|
||||
backupRequest: &Request{
|
||||
ResPolicies: policies,
|
||||
},
|
||||
}
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
action, err := ib.getMatchAction(obj, kuberesource.PersistentVolumeClaims, csiBIAPluginName)
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tc.expectedAction == nil {
|
||||
assert.Nil(t, action)
|
||||
} else {
|
||||
require.NotNil(t, action)
|
||||
assert.Equal(t, tc.expectedAction.Type, action.Type)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackSkippedPV_PendingLostPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "Lost PVC should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "Bound PVC without VolumeName should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ib := &itemBackupper{
|
||||
backupRequest: &Request{
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
},
|
||||
}
|
||||
|
||||
// Set up log capture
|
||||
logOutput := &bytes.Buffer{}
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(logOutput)
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, "", "test reason", logger)
|
||||
|
||||
logStr := logOutput.String()
|
||||
assert.Contains(t, logStr, "level=info")
|
||||
assert.Contains(t, logStr, "unable to get PV name, skip tracking.")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnTrackSkippedPV_PendingLostPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectWarningLog bool
|
||||
expectDebugMessage string
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC should log at debug level, not warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
expectWarningLog: false,
|
||||
expectDebugMessage: "unable to get PV name for Pending PVC, skip untracking.",
|
||||
},
|
||||
{
|
||||
name: "Lost PVC should log at debug level, not warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
expectWarningLog: false,
|
||||
expectDebugMessage: "unable to get PV name for Lost PVC, skip untracking.",
|
||||
},
|
||||
{
|
||||
name: "Bound PVC without VolumeName should log warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
expectWarningLog: true,
|
||||
expectDebugMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ib := &itemBackupper{
|
||||
backupRequest: &Request{
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
},
|
||||
}
|
||||
|
||||
// Set up log capture
|
||||
logOutput := &bytes.Buffer{}
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(logOutput)
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
ib.unTrackSkippedPV(obj, kuberesource.PersistentVolumeClaims, logger)
|
||||
|
||||
logStr := logOutput.String()
|
||||
if tc.expectWarningLog {
|
||||
assert.Contains(t, logStr, "level=warning")
|
||||
assert.Contains(t, logStr, "unable to get PV name, skip untracking.")
|
||||
} else {
|
||||
assert.NotContains(t, logStr, "level=warning")
|
||||
if tc.expectDebugMessage != "" {
|
||||
assert.Contains(t, logStr, "level=debug")
|
||||
assert.Contains(t, logStr, tc.expectDebugMessage)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ type itemCollector struct {
|
||||
type nsTracker struct {
|
||||
singleLabelSelector labels.Selector
|
||||
orLabelSelector []labels.Selector
|
||||
namespaceFilter *collections.IncludesExcludes
|
||||
namespaceFilter *collections.NamespaceIncludesExcludes
|
||||
logger logrus.FieldLogger
|
||||
|
||||
namespaceMap map[string]bool
|
||||
@@ -103,7 +103,7 @@ func (nt *nsTracker) init(
|
||||
unstructuredNSs []unstructured.Unstructured,
|
||||
singleLabelSelector labels.Selector,
|
||||
orLabelSelector []labels.Selector,
|
||||
namespaceFilter *collections.IncludesExcludes,
|
||||
namespaceFilter *collections.NamespaceIncludesExcludes,
|
||||
logger logrus.FieldLogger,
|
||||
) {
|
||||
if nt.namespaceMap == nil {
|
||||
@@ -635,7 +635,7 @@ func coreGroupResourcePriority(resource string) int {
|
||||
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
||||
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
|
||||
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
|
||||
func getNamespacesToList(ie *collections.IncludesExcludes) []string {
|
||||
func getNamespacesToList(ie *collections.NamespaceIncludesExcludes) []string {
|
||||
if ie == nil {
|
||||
return []string{""}
|
||||
}
|
||||
@@ -753,21 +753,28 @@ func (r *itemCollector) collectNamespaces(
|
||||
}
|
||||
|
||||
unstructuredList, err := resourceClient.List(metav1.ListOptions{})
|
||||
|
||||
activeNamespacesHashSet := make(map[string]bool)
|
||||
for _, namespace := range unstructuredList.Items {
|
||||
activeNamespacesHashSet[namespace.GetName()] = true
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("error list namespaces")
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, includedNSName := range r.backupRequest.Backup.Spec.IncludedNamespaces {
|
||||
// Change to look at the struct includes/excludes
|
||||
// In case wildcards are expanded, we need to look at the struct includes/excludes
|
||||
for _, includedNSName := range r.backupRequest.NamespaceIncludesExcludes.GetIncludes() {
|
||||
nsExists := false
|
||||
// Skip checking the namespace existing when it's "*".
|
||||
if includedNSName == "*" {
|
||||
continue
|
||||
}
|
||||
for _, unstructuredNS := range unstructuredList.Items {
|
||||
if unstructuredNS.GetName() == includedNSName {
|
||||
nsExists = true
|
||||
}
|
||||
|
||||
if _, ok := activeNamespacesHashSet[includedNSName]; ok {
|
||||
nsExists = true
|
||||
}
|
||||
|
||||
if !nsExists {
|
||||
@@ -809,17 +816,18 @@ func (r *itemCollector) collectNamespaces(
|
||||
var items []*kubernetesResource
|
||||
|
||||
for index := range unstructuredList.Items {
|
||||
nsName := unstructuredList.Items[index].GetName()
|
||||
|
||||
path, err := r.writeToFile(&unstructuredList.Items[index])
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error writing item %s to file",
|
||||
unstructuredList.Items[index].GetName())
|
||||
log.WithError(err).Errorf("Error writing item %s to file", nsName)
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, &kubernetesResource{
|
||||
groupResource: gr,
|
||||
preferredGVR: preferredGVR,
|
||||
name: unstructuredList.Items[index].GetName(),
|
||||
name: nsName,
|
||||
path: path,
|
||||
kind: resource.Kind,
|
||||
})
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestFilterNamespaces(t *testing.T) {
|
||||
func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ie *collections.IncludesExcludes
|
||||
ie *collections.NamespaceIncludesExcludes
|
||||
namespaces []*corev1api.Namespace
|
||||
backup *velerov1api.Backup
|
||||
expectedTrackedNS []string
|
||||
@@ -162,7 +162,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "ns filter by namespace IE filter",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -174,7 +174,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": "ns1"},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -186,7 +186,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
|
||||
{MatchLabels: map[string]string{"name": "ns1"}},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -198,7 +198,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": "ns1"},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Excludes("ns1"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Excludes("ns1"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -210,7 +210,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
|
||||
{MatchLabels: map[string]string{"name": "ns1"}},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Excludes("ns1", "ns2"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Excludes("ns1", "ns2"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -221,7 +221,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "No ns filters",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -231,7 +231,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "ns specified by the IncludeNamespaces cannot be found",
|
||||
backup: builder.ForBackup("velero", "backup").IncludedNamespaces("ns1", "invalid", "*").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1", "invalid", "*"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1", "invalid", "*"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
@@ -242,7 +242,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
||||
{
|
||||
name: "terminating ns should not tracked",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1", "ns2"),
|
||||
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1", "ns2"),
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").Phase(corev1api.NamespaceTerminating).Result(),
|
||||
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
@@ -32,26 +34,42 @@ type itemKey struct {
|
||||
name string
|
||||
}
|
||||
|
||||
type SynchronizedVSList struct {
|
||||
sync.Mutex
|
||||
VolumeSnapshotList []*volume.Snapshot
|
||||
}
|
||||
|
||||
func (s *SynchronizedVSList) Add(vs *volume.Snapshot) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.VolumeSnapshotList = append(s.VolumeSnapshotList, vs)
|
||||
}
|
||||
|
||||
func (s *SynchronizedVSList) Get() []*volume.Snapshot {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.VolumeSnapshotList
|
||||
}
|
||||
|
||||
// Request is a request for a backup, with all references to other objects
|
||||
// materialized (e.g. backup/snapshot locations, includes/excludes, etc.)
|
||||
type Request struct {
|
||||
*velerov1api.Backup
|
||||
|
||||
StorageLocation *velerov1api.BackupStorageLocation
|
||||
SnapshotLocations []*velerov1api.VolumeSnapshotLocation
|
||||
NamespaceIncludesExcludes *collections.IncludesExcludes
|
||||
NamespaceIncludesExcludes *collections.NamespaceIncludesExcludes
|
||||
ResourceIncludesExcludes collections.IncludesExcludesInterface
|
||||
ResourceHooks []hook.ResourceHook
|
||||
ResolvedActions []framework.BackupItemResolvedActionV2
|
||||
ResolvedItemBlockActions []framework.ItemBlockResolvedAction
|
||||
VolumeSnapshots []*volume.Snapshot
|
||||
VolumeSnapshots SynchronizedVSList
|
||||
PodVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
BackedUpItems *backedUpItemsMap
|
||||
itemOperationsList *[]*itemoperation.BackupOperation
|
||||
ResPolicies *resourcepolicies.Policies
|
||||
SkippedPVTracker *skipPVTracker
|
||||
VolumesInformation volume.BackupVolumesInformation
|
||||
ItemBlockChannel chan ItemBlockInput
|
||||
WorkerPool *ItemBlockWorkerPool
|
||||
}
|
||||
|
||||
// BackupVolumesInformation contains the information needs by generating
|
||||
@@ -80,8 +98,12 @@ func (r *Request) FillVolumesInformation() {
|
||||
}
|
||||
|
||||
r.VolumesInformation.SkippedPVs = skippedPVMap
|
||||
r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots
|
||||
r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots.Get()
|
||||
r.VolumesInformation.PodVolumeBackups = r.PodVolumeBackups
|
||||
r.VolumesInformation.BackupOperations = *r.GetItemOperationsList()
|
||||
r.VolumesInformation.BackupName = r.Backup.Name
|
||||
}
|
||||
|
||||
func (r *Request) StopWorkerPool() {
|
||||
r.WorkerPool.Stop()
|
||||
}
|
||||
|
||||
42
pkg/backup/volume_snapshotter_cache.go
Normal file
42
pkg/backup/volume_snapshotter_cache.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
|
||||
)
|
||||
|
||||
type VolumeSnapshotterCache struct {
|
||||
cache map[string]vsv1.VolumeSnapshotter
|
||||
mutex sync.Mutex
|
||||
getter VolumeSnapshotterGetter
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotterCache(getter VolumeSnapshotterGetter) *VolumeSnapshotterCache {
|
||||
return &VolumeSnapshotterCache{
|
||||
cache: make(map[string]vsv1.VolumeSnapshotter),
|
||||
getter: getter,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *VolumeSnapshotterCache) SetNX(location *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if snapshotter, exists := c.cache[location.Name]; exists {
|
||||
return snapshotter, nil
|
||||
}
|
||||
|
||||
snapshotter, err := c.getter.GetVolumeSnapshotter(location.Spec.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := snapshotter.Init(location.Spec.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.cache[location.Name] = snapshotter
|
||||
return snapshotter, nil
|
||||
}
|
||||
@@ -222,6 +222,12 @@ func (b *BackupBuilder) Phase(phase velerov1api.BackupPhase) *BackupBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
// Phase sets the Backup's queue position.
|
||||
func (b *BackupBuilder) QueuePosition(queuePos int) *BackupBuilder {
|
||||
b.object.Status.QueuePosition = queuePos
|
||||
return b
|
||||
}
|
||||
|
||||
// StorageLocation sets the Backup's storage location.
|
||||
func (b *BackupBuilder) StorageLocation(location string) *BackupBuilder {
|
||||
b.object.Spec.StorageLocation = location
|
||||
|
||||
@@ -93,6 +93,15 @@ func (b *BackupStorageLocationBuilder) CACert(val []byte) *BackupStorageLocation
|
||||
return b
|
||||
}
|
||||
|
||||
// CACertRef sets the BackupStorageLocation's object storage CACertRef (Secret reference).
|
||||
func (b *BackupStorageLocationBuilder) CACertRef(selector *corev1api.SecretKeySelector) *BackupStorageLocationBuilder {
|
||||
if b.object.Spec.StorageType.ObjectStorage == nil {
|
||||
b.object.Spec.StorageType.ObjectStorage = new(velerov1api.ObjectStorageLocation)
|
||||
}
|
||||
b.object.Spec.ObjectStorage.CACertRef = selector
|
||||
return b
|
||||
}
|
||||
|
||||
// Default sets the BackupStorageLocation's is default or not
|
||||
func (b *BackupStorageLocationBuilder) Default(isDefault bool) *BackupStorageLocationBuilder {
|
||||
b.object.Spec.Default = isDefault
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apimachineryRuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
)
|
||||
|
||||
// ContainerBuilder builds Container objects
|
||||
@@ -45,9 +47,9 @@ func ForPluginContainer(image string, pullPolicy corev1api.PullPolicy) *Containe
|
||||
return ForContainer(getName(image), image).PullPolicy(pullPolicy).VolumeMounts(volumeMount)
|
||||
}
|
||||
|
||||
// getName returns the 'name' component of a docker
|
||||
// image that includes the entire string except the registry name, and transforms the combined
|
||||
// string into a RFC-1123 compatible name.
|
||||
// getName returns the 'name' component of a docker image that includes the entire string
|
||||
// except the registry name, and transforms the combined string into a DNS-1123 compatible name
|
||||
// that fits within the 63-character limit for Kubernetes container names.
|
||||
func getName(image string) string {
|
||||
slashIndex := strings.Index(image, "/")
|
||||
slashCount := 0
|
||||
@@ -83,7 +85,10 @@ func getName(image string) string {
|
||||
re := strings.NewReplacer("/", "-",
|
||||
"_", "-",
|
||||
".", "-")
|
||||
return re.Replace(image[start:end])
|
||||
name := re.Replace(image[start:end])
|
||||
|
||||
// Ensure the name doesn't exceed Kubernetes container name length limit
|
||||
return label.GetValidName(name)
|
||||
}
|
||||
|
||||
// Result returns the built Container.
|
||||
|
||||
@@ -100,3 +100,50 @@ func TestGetName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNameWithLongPaths(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
image string
|
||||
validate func(t *testing.T, result string)
|
||||
}{
|
||||
{
|
||||
name: "plugin with deeply nested repository path exceeding 63 characters",
|
||||
image: "arohcpsvcdev.azurecr.io/redhat-user-workloads/ocp-art-tenant/oadp-hypershift-oadp-plugin-main@sha256:adb840bf3890b4904a8cdda1a74c82cf8d96c52eba9944ac10e795335d6fd450",
|
||||
validate: func(t *testing.T, result string) {
|
||||
t.Helper()
|
||||
// Should not exceed DNS-1123 label limit of 63 characters
|
||||
assert.LessOrEqual(t, len(result), 63, "Container name must satisfy DNS-1123 label constraints (max 63 chars)")
|
||||
// Should be exactly 63 characters (truncated with hash)
|
||||
assert.Len(t, result, 63)
|
||||
// Should be deterministic
|
||||
result2 := getName("arohcpsvcdev.azurecr.io/redhat-user-workloads/ocp-art-tenant/oadp-hypershift-oadp-plugin-main@sha256:adb840bf3890b4904a8cdda1a74c82cf8d96c52eba9944ac10e795335d6fd450")
|
||||
assert.Equal(t, result, result2)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plugin with normal path length (should remain unchanged)",
|
||||
image: "arohcpsvcdev.azurecr.io/konveyor/velero-plugin-for-microsoft-azure@sha256:b2db5f09da514e817a74c992dcca5f90b77c2ab0b2797eba947d224271d6070e",
|
||||
validate: func(t *testing.T, result string) {
|
||||
t.Helper()
|
||||
assert.Equal(t, "konveyor-velero-plugin-for-microsoft-azure", result)
|
||||
assert.LessOrEqual(t, len(result), 63)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "very long nested path",
|
||||
image: "registry.example.com/org/team/project/subproject/component/service/application-name-with-many-words:v1.2.3",
|
||||
validate: func(t *testing.T, result string) {
|
||||
t.Helper()
|
||||
assert.LessOrEqual(t, len(result), 63)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result := getName(test.image)
|
||||
test.validate(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,6 +145,12 @@ func (d *DataUploadBuilder) Progress(progress shared.DataMoveOperationProgress)
|
||||
return d
|
||||
}
|
||||
|
||||
// IncrementalBytes sets the DataUpload's IncrementalBytes.
|
||||
func (d *DataUploadBuilder) IncrementalBytes(incrementalBytes int64) *DataUploadBuilder {
|
||||
d.object.Status.IncrementalBytes = incrementalBytes
|
||||
return d
|
||||
}
|
||||
|
||||
// Node sets the DataUpload's Node.
|
||||
func (d *DataUploadBuilder) Node(node string) *DataUploadBuilder {
|
||||
d.object.Status.Node = node
|
||||
@@ -180,3 +186,9 @@ func (d *DataUploadBuilder) Message(msg string) *DataUploadBuilder {
|
||||
d.object.Status.Message = msg
|
||||
return d
|
||||
}
|
||||
|
||||
// TotalBytes sets the DataUpload's TotalBytes.
|
||||
func (d *DataUploadBuilder) TotalBytes(size int64) *DataUploadBuilder {
|
||||
d.object.Status.Progress.TotalBytes = size
|
||||
return d
|
||||
}
|
||||
|
||||
52
pkg/builder/priority_class_builder.go
Normal file
52
pkg/builder/priority_class_builder.go
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
schedulingv1api "k8s.io/api/scheduling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type PriorityClassBuilder struct {
|
||||
object *schedulingv1api.PriorityClass
|
||||
}
|
||||
|
||||
func ForPriorityClass(name string) *PriorityClassBuilder {
|
||||
return &PriorityClassBuilder{
|
||||
object: &schedulingv1api.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PriorityClassBuilder) Value(value int) *PriorityClassBuilder {
|
||||
p.object.Value = int32(value)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PriorityClassBuilder) PreemptionPolicy(policy string) *PriorityClassBuilder {
|
||||
preemptionPolicy := corev1api.PreemptionPolicy(policy)
|
||||
p.object.PreemptionPolicy = &preemptionPolicy
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PriorityClassBuilder) Result() *schedulingv1api.PriorityClass {
|
||||
return p.object
|
||||
}
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package builder
|
||||
|
||||
import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@@ -87,3 +88,9 @@ func (b *StorageClassBuilder) Provisioner(provisioner string) *StorageClassBuild
|
||||
b.object.Provisioner = provisioner
|
||||
return b
|
||||
}
|
||||
|
||||
// ReclaimPolicy sets StorageClass's reclaimPolicy.
|
||||
func (b *StorageClassBuilder) ReclaimPolicy(policy corev1api.PersistentVolumeReclaimPolicy) *StorageClassBuilder {
|
||||
b.object.ReclaimPolicy = &policy
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -102,6 +102,11 @@ type StatusUpdater interface {
|
||||
UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
// Applier applies changes to an object using server-side apply
|
||||
type Applier interface {
|
||||
Apply(name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
// Dynamic contains client methods that Velero needs for backing up and restoring resources.
|
||||
type Dynamic interface {
|
||||
Creator
|
||||
@@ -111,6 +116,7 @@ type Dynamic interface {
|
||||
Patcher
|
||||
Deletor
|
||||
StatusUpdater
|
||||
Applier
|
||||
}
|
||||
|
||||
// dynamicResourceClient implements Dynamic.
|
||||
@@ -136,6 +142,10 @@ func (d *dynamicResourceClient) Get(name string, opts metav1.GetOptions) (*unstr
|
||||
return d.resourceClient.Get(context.TODO(), name, opts)
|
||||
}
|
||||
|
||||
func (d *dynamicResourceClient) Apply(name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) {
|
||||
return d.resourceClient.Apply(context.TODO(), name, obj, opts)
|
||||
}
|
||||
|
||||
func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.Unstructured, error) {
|
||||
return d.resourceClient.Patch(context.TODO(), name, types.MergePatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
|
||||
if os.Getenv(cmdtest.CaptureFlag) == "1" {
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestNewDownloadCommand(t *testing.T) {
|
||||
assert.NoError(t, e)
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
_, stderr, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
require.NoError(t, err)
|
||||
@@ -84,7 +84,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
e = d.Execute()
|
||||
require.NoError(t, e)
|
||||
|
||||
cmd = exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd = exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err = veleroexec.RunCommand(cmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestNewDeleteCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
c.Execute()
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
_, stderr, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestSetCommand_Execute(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
_, stderr, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package bug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@@ -147,7 +148,7 @@ func getKubectlVersion() (string, error) {
|
||||
return "", errors.New("kubectl not found on PATH")
|
||||
}
|
||||
|
||||
kubectlCmd := exec.Command("kubectl", "version")
|
||||
kubectlCmd := exec.CommandContext(context.Background(), "kubectl", "version")
|
||||
var outbuf bytes.Buffer
|
||||
kubectlCmd.Stdout = &outbuf
|
||||
if err := kubectlCmd.Start(); err != nil {
|
||||
@@ -207,16 +208,17 @@ func renderToString(bugInfo *VeleroBugInfo) (string, error) {
|
||||
// a platform specific binary.
|
||||
func showIssueInBrowser(body string) error {
|
||||
url := issueURL + "?body=" + url.QueryEscape(body)
|
||||
ctx := context.Background()
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return exec.Command("open", url).Start()
|
||||
return exec.CommandContext(ctx, "open", url).Start()
|
||||
case "linux":
|
||||
if cmdExistsOnPath("xdg-open") {
|
||||
return exec.Command("xdg-open", url).Start()
|
||||
return exec.CommandContext(ctx, "xdg-open", url).Start()
|
||||
}
|
||||
return fmt.Errorf("velero can't open a browser window using the command '%s'", "xdg-open")
|
||||
case "windows":
|
||||
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
return exec.CommandContext(ctx, "rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
default:
|
||||
return fmt.Errorf("velero can't open a browser window on platform %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ type dataMoverRestoreConfig struct {
|
||||
volumePath string
|
||||
volumeMode string
|
||||
ddName string
|
||||
cacheDir string
|
||||
resourceTimeout time.Duration
|
||||
}
|
||||
|
||||
@@ -89,6 +90,7 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
|
||||
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
|
||||
command.Flags().StringVar(&config.volumeMode, "volume-mode", config.volumeMode, "The mode of the volume to be restored")
|
||||
command.Flags().StringVar(&config.ddName, "data-download", config.ddName, "The data download name")
|
||||
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
|
||||
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
|
||||
|
||||
_ = command.MarkFlagRequired("volume-path")
|
||||
@@ -288,5 +290,5 @@ func (s *dataMoverRestore) createDataPathService() (dataPathService, error) {
|
||||
return datamover.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.ddName, s.namespace, s.nodeName, datapath.AccessPoint{
|
||||
ByPath: s.config.volumePath,
|
||||
VolMode: uploader.PersistentVolumeMode(s.config.volumeMode),
|
||||
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.logger), nil
|
||||
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.config.cacheDir, s.logger), nil
|
||||
}
|
||||
|
||||
@@ -89,8 +89,10 @@ type Options struct {
|
||||
RepoMaintenanceJobConfigMap string
|
||||
NodeAgentConfigMap string
|
||||
ItemBlockWorkerCount int
|
||||
ConcurrentBackups int
|
||||
NodeAgentDisableHostPath bool
|
||||
kubeletRootDir string
|
||||
Apply bool
|
||||
ServerPriorityClassName string
|
||||
NodeAgentPriorityClassName string
|
||||
}
|
||||
@@ -101,6 +103,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&o.BucketName, "bucket", o.BucketName, "Name of the object storage bucket where backups should be stored")
|
||||
flags.StringVar(&o.SecretFile, "secret-file", o.SecretFile, "File containing credentials for backup and volume provider. If not specified, --no-secret must be used for confirmation. Optional.")
|
||||
flags.BoolVar(&o.NoSecret, "no-secret", o.NoSecret, "Flag indicating if a secret should be created. Must be used as confirmation if --secret-file is not provided. Optional.")
|
||||
flags.BoolVar(&o.Apply, "apply", o.Apply, "Flag indicating if resources should be applied instead of created. This can be used for updating existing resources.")
|
||||
flags.BoolVar(&o.NoDefaultBackupLocation, "no-default-backup-location", o.NoDefaultBackupLocation, "Flag indicating if a default backup location should be created. Must be used as confirmation if --bucket or --provider are not provided. Optional.")
|
||||
flags.StringVar(&o.Image, "image", o.Image, "Image to use for the Velero and node agent pods. Optional.")
|
||||
flags.StringVar(&o.Prefix, "prefix", o.Prefix, "Prefix under which all Velero data should be stored within the bucket. Optional.")
|
||||
@@ -196,6 +199,12 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
|
||||
o.ItemBlockWorkerCount,
|
||||
"Number of worker threads to process ItemBlocks. Default is one. Optional.",
|
||||
)
|
||||
flags.IntVar(
|
||||
&o.ConcurrentBackups,
|
||||
"concurrent-backups",
|
||||
o.ConcurrentBackups,
|
||||
"Number of backups to process concurrently. Default is one. Optional.",
|
||||
)
|
||||
flags.StringVar(
|
||||
&o.ServerPriorityClassName,
|
||||
"server-priority-class-name",
|
||||
@@ -313,6 +322,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
RepoMaintenanceJobConfigMap: o.RepoMaintenanceJobConfigMap,
|
||||
NodeAgentConfigMap: o.NodeAgentConfigMap,
|
||||
ItemBlockWorkerCount: o.ItemBlockWorkerCount,
|
||||
ConcurrentBackups: o.ConcurrentBackups,
|
||||
KubeletRootDir: o.kubeletRootDir,
|
||||
NodeAgentDisableHostPath: o.NodeAgentDisableHostPath,
|
||||
ServerPriorityClassName: o.ServerPriorityClassName,
|
||||
@@ -408,7 +418,7 @@ func (o *Options) Run(c *cobra.Command, f client.Factory) error {
|
||||
|
||||
errorMsg := fmt.Sprintf("\n\nError installing Velero. Use `kubectl logs deploy/velero -n %s` to check the deploy logs", o.Namespace)
|
||||
|
||||
err = install.Install(dynamicFactory, kbClient, resources, os.Stdout)
|
||||
err = install.Install(dynamicFactory, kbClient, resources, os.Stdout, o.Apply)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, errorMsg)
|
||||
}
|
||||
@@ -545,24 +555,22 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er
|
||||
return fmt.Errorf("fail to create go-client %w", err)
|
||||
}
|
||||
|
||||
// If either Linux or Windows node-agent is installed, and the node-agent-configmap
|
||||
// is specified, need to validate the ConfigMap.
|
||||
if (o.UseNodeAgent || o.UseNodeAgentWindows) && len(o.NodeAgentConfigMap) > 0 {
|
||||
if len(o.NodeAgentConfigMap) > 0 {
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.NodeAgentConfigMap, &velerotypes.NodeAgentConfigs{}); err != nil {
|
||||
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid", o.NodeAgentConfigMap)
|
||||
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid: %w", o.NodeAgentConfigMap, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.RepoMaintenanceJobConfigMap) > 0 {
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.RepoMaintenanceJobConfigMap, &velerotypes.JobConfigs{}); err != nil {
|
||||
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid", o.RepoMaintenanceJobConfigMap)
|
||||
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid: %w", o.RepoMaintenanceJobConfigMap, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.BackupRepoConfigMap) > 0 {
|
||||
config := make(map[string]any)
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.BackupRepoConfigMap, &config); err != nil {
|
||||
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid", o.BackupRepoConfigMap)
|
||||
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid: %w", o.BackupRepoConfigMap, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,6 +60,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/exposer"
|
||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -84,6 +85,7 @@ type nodeAgentServerConfig struct {
|
||||
resourceTimeout time.Duration
|
||||
dataMoverPrepareTimeout time.Duration
|
||||
nodeAgentConfig string
|
||||
backupRepoConfig string
|
||||
}
|
||||
|
||||
func NewServerCommand(f client.Factory) *cobra.Command {
|
||||
@@ -121,6 +123,7 @@ func NewServerCommand(f client.Factory) *cobra.Command {
|
||||
command.Flags().DurationVar(&config.dataMoverPrepareTimeout, "data-mover-prepare-timeout", config.dataMoverPrepareTimeout, "How long to wait for preparing a DataUpload/DataDownload. Default is 30 minutes.")
|
||||
command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics")
|
||||
command.Flags().StringVar(&config.nodeAgentConfig, "node-agent-configmap", config.nodeAgentConfig, "The name of ConfigMap containing node-agent configurations.")
|
||||
command.Flags().StringVar(&config.backupRepoConfig, "backup-repository-configmap", config.backupRepoConfig, "The name of ConfigMap containing backup repository configurations.")
|
||||
|
||||
return command
|
||||
}
|
||||
@@ -140,7 +143,9 @@ type nodeAgentServer struct {
|
||||
csiSnapshotClient *snapshotv1client.Clientset
|
||||
dataPathMgr *datapath.Manager
|
||||
dataPathConfigs *velerotypes.NodeAgentConfigs
|
||||
backupRepoConfigs map[string]string
|
||||
vgdpCounter *exposer.VgdpCounter
|
||||
repoConfigMgr repository.ConfigManager
|
||||
}
|
||||
|
||||
func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, config nodeAgentServerConfig) (*nodeAgentServer, error) {
|
||||
@@ -234,6 +239,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
namespace: factory.Namespace(),
|
||||
nodeName: nodeName,
|
||||
metricsAddress: config.metricsAddress,
|
||||
repoConfigMgr: repository.NewConfigManager(logger),
|
||||
}
|
||||
|
||||
// the cache isn't initialized yet when "validatePodVolumesHostPath" is called, the client returned by the manager cannot
|
||||
@@ -254,6 +260,11 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
if err := s.getDataPathConfigs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.getBackupRepoConfigs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.dataPathMgr = datapath.NewManager(s.getDataPathConcurrentNum(defaultDataPathConcurrentNum))
|
||||
|
||||
return s, nil
|
||||
@@ -308,6 +319,8 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
|
||||
}
|
||||
|
||||
privilegedFsBackup := s.dataPathConfigs != nil && s.dataPathConfigs.PrivilegedFsBackup
|
||||
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
@@ -327,12 +340,74 @@ func (s *nodeAgentServer) run() {
|
||||
}
|
||||
}
|
||||
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass)
|
||||
var cachePVCConfig *velerotypes.CachePVC
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
|
||||
if err := s.validateCachePVCConfig(*s.dataPathConfigs.CachePVCConfig); err != nil {
|
||||
s.logger.WithError(err).Warnf("Ignore cache config %v", s.dataPathConfigs.CachePVCConfig)
|
||||
} else {
|
||||
cachePVCConfig = s.dataPathConfigs.CachePVCConfig
|
||||
s.logger.Infof("Using cache volume configs %v", s.dataPathConfigs.CachePVCConfig)
|
||||
}
|
||||
}
|
||||
|
||||
var podLabels map[string]string
|
||||
if s.dataPathConfigs != nil && len(s.dataPathConfigs.PodLabels) > 0 {
|
||||
podLabels = s.dataPathConfigs.PodLabels
|
||||
s.logger.Infof("Using customized pod labels %+v", podLabels)
|
||||
}
|
||||
|
||||
var podAnnotations map[string]string
|
||||
if s.dataPathConfigs != nil && len(s.dataPathConfigs.PodAnnotations) > 0 {
|
||||
podAnnotations = s.dataPathConfigs.PodAnnotations
|
||||
s.logger.Infof("Using customized pod annotations %+v", podAnnotations)
|
||||
}
|
||||
|
||||
if s.backupRepoConfigs != nil {
|
||||
s.logger.Infof("Using backup repo config %v", s.backupRepoConfigs)
|
||||
} else if cachePVCConfig != nil {
|
||||
s.logger.Info("Backup repo config is not provided, using default values for cache volume configs")
|
||||
}
|
||||
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.mgr,
|
||||
s.kubeClient,
|
||||
s.dataPathMgr,
|
||||
s.vgdpCounter,
|
||||
s.nodeName,
|
||||
s.config.dataMoverPrepareTimeout,
|
||||
s.config.resourceTimeout,
|
||||
podResources,
|
||||
s.metrics,
|
||||
s.logger,
|
||||
dataMovePriorityClass,
|
||||
privilegedFsBackup,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
||||
}
|
||||
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass)
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.mgr,
|
||||
s.kubeClient,
|
||||
s.dataPathMgr,
|
||||
s.vgdpCounter,
|
||||
s.nodeName,
|
||||
s.config.dataMoverPrepareTimeout,
|
||||
s.config.resourceTimeout,
|
||||
s.backupRepoConfigs,
|
||||
cachePVCConfig,
|
||||
podResources,
|
||||
s.logger,
|
||||
dataMovePriorityClass,
|
||||
privilegedFsBackup,
|
||||
s.repoConfigMgr,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
||||
}
|
||||
@@ -357,6 +432,8 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger,
|
||||
s.metrics,
|
||||
dataMovePriorityClass,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
if err := dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
|
||||
@@ -376,12 +453,17 @@ func (s *nodeAgentServer) run() {
|
||||
s.vgdpCounter,
|
||||
loadAffinity,
|
||||
restorePVCConfig,
|
||||
s.backupRepoConfigs,
|
||||
cachePVCConfig,
|
||||
podResources,
|
||||
s.nodeName,
|
||||
s.config.dataMoverPrepareTimeout,
|
||||
s.logger,
|
||||
s.metrics,
|
||||
dataMovePriorityClass,
|
||||
s.repoConfigMgr,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
|
||||
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
@@ -555,14 +637,32 @@ func (s *nodeAgentServer) getDataPathConfigs() error {
|
||||
|
||||
configs, err := getConfigsFunc(s.ctx, s.namespace, s.kubeClient, s.config.nodeAgentConfig)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Errorf("Failed to get node agent configs from configMap %s, ignore it", s.config.nodeAgentConfig)
|
||||
return err
|
||||
return errors.Wrapf(err, "error getting node agent configs from configMap %s", s.config.nodeAgentConfig)
|
||||
}
|
||||
|
||||
s.dataPathConfigs = configs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *nodeAgentServer) getBackupRepoConfigs() error {
|
||||
if s.config.backupRepoConfig == "" {
|
||||
s.logger.Info("No backup repo configMap is specified")
|
||||
return nil
|
||||
}
|
||||
|
||||
cm, err := s.kubeClient.CoreV1().ConfigMaps(s.namespace).Get(s.ctx, s.config.backupRepoConfig, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting backup repo configs from configMap %s", s.config.backupRepoConfig)
|
||||
}
|
||||
|
||||
if cm.Data == nil {
|
||||
return errors.Errorf("no data is in the backup repo configMap %s", s.config.backupRepoConfig)
|
||||
}
|
||||
|
||||
s.backupRepoConfigs = cm.Data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
|
||||
configs := s.dataPathConfigs
|
||||
|
||||
@@ -618,3 +718,20 @@ func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
|
||||
|
||||
return concurrentNum
|
||||
}
|
||||
|
||||
func (s *nodeAgentServer) validateCachePVCConfig(config velerotypes.CachePVC) error {
|
||||
if config.StorageClass == "" {
|
||||
return errors.New("storage class is absent")
|
||||
}
|
||||
|
||||
sc, err := s.kubeClient.StorageV1().StorageClasses().Get(s.ctx, config.StorageClass, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting storage class %s", config.StorageClass)
|
||||
}
|
||||
|
||||
if sc.ReclaimPolicy != nil && *sc.ReclaimPolicy != corev1api.PersistentVolumeReclaimDelete {
|
||||
return errors.Errorf("unexpected storage class reclaim policy %v", *sc.ReclaimPolicy)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
testutil "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
@@ -142,11 +145,10 @@ func Test_getDataPathConfigs(t *testing.T) {
|
||||
getFunc func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error)
|
||||
configMapName string
|
||||
expectConfigs *velerotypes.NodeAgentConfigs
|
||||
expectLog string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "no config specified",
|
||||
expectLog: "No node-agent configMap is specified",
|
||||
name: "no config specified",
|
||||
},
|
||||
{
|
||||
name: "failed to get configs",
|
||||
@@ -154,7 +156,7 @@ func Test_getDataPathConfigs(t *testing.T) {
|
||||
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
|
||||
return nil, errors.New("fake-get-error")
|
||||
},
|
||||
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
|
||||
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-get-error",
|
||||
},
|
||||
{
|
||||
name: "configs cm not found",
|
||||
@@ -162,7 +164,7 @@ func Test_getDataPathConfigs(t *testing.T) {
|
||||
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
|
||||
return nil, errors.New("fake-not-found-error")
|
||||
},
|
||||
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
|
||||
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-not-found-error",
|
||||
},
|
||||
|
||||
{
|
||||
@@ -177,23 +179,21 @@ func Test_getDataPathConfigs(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
logBuffer := ""
|
||||
|
||||
s := &nodeAgentServer{
|
||||
config: nodeAgentServerConfig{
|
||||
nodeAgentConfig: test.configMapName,
|
||||
},
|
||||
logger: testutil.NewSingleLogger(&logBuffer),
|
||||
logger: testutil.NewLogger(),
|
||||
}
|
||||
|
||||
getConfigsFunc = test.getFunc
|
||||
|
||||
s.getDataPathConfigs()
|
||||
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
|
||||
if test.expectLog == "" {
|
||||
assert.Empty(t, logBuffer)
|
||||
err := s.getDataPathConfigs()
|
||||
if test.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
|
||||
} else {
|
||||
assert.Contains(t, logBuffer, test.expectLog)
|
||||
require.EqualError(t, err, test.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -416,3 +416,117 @@ func Test_getDataPathConcurrentNum(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBackupRepoConfigs(t *testing.T) {
|
||||
cmNoData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Result()
|
||||
cmWithData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Data("cacheLimit", "100").Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
configMapName string
|
||||
kubeClientObj []runtime.Object
|
||||
expectConfigs map[string]string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "no config specified",
|
||||
},
|
||||
{
|
||||
name: "failed to get configs",
|
||||
configMapName: "backup-repo-config",
|
||||
expectedErr: "error getting backup repo configs from configMap backup-repo-config: configmaps \"backup-repo-config\" not found",
|
||||
},
|
||||
{
|
||||
name: "configs data not found",
|
||||
kubeClientObj: []runtime.Object{cmNoData},
|
||||
configMapName: "backup-repo-config",
|
||||
expectedErr: "no data is in the backup repo configMap backup-repo-config",
|
||||
},
|
||||
{
|
||||
name: "succeed",
|
||||
configMapName: "backup-repo-config",
|
||||
kubeClientObj: []runtime.Object{cmWithData},
|
||||
expectConfigs: map[string]string{"cacheLimit": "100"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
|
||||
|
||||
s := &nodeAgentServer{
|
||||
namespace: velerov1api.DefaultNamespace,
|
||||
kubeClient: fakeKubeClient,
|
||||
config: nodeAgentServerConfig{
|
||||
backupRepoConfig: test.configMapName,
|
||||
},
|
||||
logger: testutil.NewLogger(),
|
||||
}
|
||||
|
||||
err := s.getBackupRepoConfigs()
|
||||
if test.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.expectConfigs, s.backupRepoConfigs)
|
||||
} else {
|
||||
require.EqualError(t, err, test.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCachePVCConfig(t *testing.T) {
|
||||
scWithRetainPolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()
|
||||
scWithDeletePolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result()
|
||||
scWithNoPolicy := builder.ForStorageClass("fake-storage-class").Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
config velerotypes.CachePVC
|
||||
kubeClientObj []runtime.Object
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "no storage class",
|
||||
expectedErr: "storage class is absent",
|
||||
},
|
||||
{
|
||||
name: "failed to get storage class",
|
||||
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
|
||||
expectedErr: "error getting storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found",
|
||||
},
|
||||
{
|
||||
name: "storage class reclaim policy is not expected",
|
||||
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
|
||||
kubeClientObj: []runtime.Object{scWithRetainPolicy},
|
||||
expectedErr: "unexpected storage class reclaim policy Retain",
|
||||
},
|
||||
{
|
||||
name: "storage class reclaim policy is delete",
|
||||
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
|
||||
kubeClientObj: []runtime.Object{scWithDeletePolicy},
|
||||
},
|
||||
{
|
||||
name: "storage class with no reclaim policy",
|
||||
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
|
||||
kubeClientObj: []runtime.Object{scWithNoPolicy},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
|
||||
|
||||
s := &nodeAgentServer{
|
||||
kubeClient: fakeKubeClient,
|
||||
}
|
||||
|
||||
err := s.validateCachePVCConfig(test.config)
|
||||
|
||||
if test.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, test.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ import (
|
||||
type podVolumeRestoreConfig struct {
|
||||
volumePath string
|
||||
pvrName string
|
||||
cacheDir string
|
||||
resourceTimeout time.Duration
|
||||
}
|
||||
|
||||
@@ -86,6 +87,7 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
|
||||
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
|
||||
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
|
||||
command.Flags().StringVar(&config.pvrName, "pod-volume-restore", config.pvrName, "The PVR name")
|
||||
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
|
||||
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
|
||||
|
||||
_ = command.MarkFlagRequired("volume-path")
|
||||
@@ -294,5 +296,5 @@ func (s *podVolumeRestore) createDataPathService() (dataPathService, error) {
|
||||
return podvolume.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.pvrName, s.namespace, s.nodeName, datapath.AccessPoint{
|
||||
ByPath: s.config.volumePath,
|
||||
VolMode: uploader.PersistentVolumeFilesystem,
|
||||
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.logger), nil
|
||||
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.config.cacheDir, s.logger), nil
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
|
||||
if os.Getenv(cmdtest.CaptureFlag) == "1" {
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -47,11 +47,13 @@ const (
|
||||
defaultDisableInformerCache = false
|
||||
|
||||
DefaultItemBlockWorkerCount = 1
|
||||
DefaultConcurrentBackups = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// DisableableControllers is a list of controllers that can be disabled
|
||||
DisableableControllers = []string{
|
||||
constant.ControllerBackupQueue,
|
||||
constant.ControllerBackup,
|
||||
constant.ControllerBackupOperations,
|
||||
constant.ControllerBackupDeletion,
|
||||
@@ -113,7 +115,11 @@ var (
|
||||
"datauploads.velero.io",
|
||||
"persistentvolumes",
|
||||
"persistentvolumeclaims",
|
||||
"clusterroles",
|
||||
"roles",
|
||||
"serviceaccounts",
|
||||
"clusterrolebindings",
|
||||
"rolebindings",
|
||||
"secrets",
|
||||
"configmaps",
|
||||
"limitranges",
|
||||
@@ -174,6 +180,7 @@ type Config struct {
|
||||
BackupRepoConfig string
|
||||
RepoMaintenanceJobConfig string
|
||||
ItemBlockWorkerCount int
|
||||
ConcurrentBackups int
|
||||
}
|
||||
|
||||
func GetDefaultConfig() *Config {
|
||||
@@ -206,6 +213,7 @@ func GetDefaultConfig() *Config {
|
||||
ScheduleSkipImmediately: false,
|
||||
CredentialsDirectory: credentials.DefaultStoreDirectory(),
|
||||
ItemBlockWorkerCount: DefaultItemBlockWorkerCount,
|
||||
ConcurrentBackups: DefaultConcurrentBackups,
|
||||
}
|
||||
|
||||
return config
|
||||
@@ -261,4 +269,10 @@ func (c *Config) BindFlags(flags *pflag.FlagSet) {
|
||||
c.ItemBlockWorkerCount,
|
||||
"Number of worker threads to process ItemBlocks. Default is one. Optional.",
|
||||
)
|
||||
flags.IntVar(
|
||||
&c.ConcurrentBackups,
|
||||
"concurrent-backups",
|
||||
c.ConcurrentBackups,
|
||||
"Number of backups to process concurrently. Default is one. Optional.",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -558,7 +558,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
return clientmgmt.NewManager(logger, s.logLevel, s.pluginRegistry)
|
||||
}
|
||||
|
||||
backupStoreGetter := persistence.NewObjectBackupStoreGetter(s.credentialFileStore)
|
||||
backupStoreGetter := persistence.NewObjectBackupStoreGetterWithSecretStore(s.credentialFileStore, s.credentialSecretStore)
|
||||
|
||||
backupTracker := controller.NewBackupTracker()
|
||||
|
||||
@@ -581,6 +581,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
constant.ControllerSchedule: {},
|
||||
constant.ControllerServerStatusRequest: {},
|
||||
constant.ControllerRestoreFinalizer: {},
|
||||
constant.ControllerBackupQueue: {},
|
||||
}
|
||||
|
||||
if s.config.RestoreOnly {
|
||||
@@ -668,6 +669,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.config.MaxConcurrentK8SConnections,
|
||||
s.config.DefaultSnapshotMoveData,
|
||||
s.config.ItemBlockWorkerCount,
|
||||
s.config.ConcurrentBackups,
|
||||
s.crClient,
|
||||
).SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackup)
|
||||
@@ -756,6 +758,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.config.RepoMaintenanceJobConfig,
|
||||
s.logLevel,
|
||||
s.config.LogFormat,
|
||||
s.metrics,
|
||||
).SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackupRepo)
|
||||
}
|
||||
@@ -909,6 +912,18 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := enabledRuntimeControllers[constant.ControllerBackupQueue]; ok {
|
||||
if err := controller.NewBackupQueueReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.mgr.GetScheme(),
|
||||
s.logger,
|
||||
s.config.ConcurrentBackups,
|
||||
backupTracker,
|
||||
).SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackupQueue)
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info("Server starting...")
|
||||
|
||||
if err := s.mgr.Start(s.ctx); err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user