Compare commits

..

131 Commits

Author SHA1 Message Date
Xun Jiang
78578d51ef Remove PVC node selection E2E test case.
According to #7904, remove the corresponding E2E case.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-11-11 10:45:49 +08:00
lyndon-li
df07c39014 Merge pull request #9368 from shubham-pampattiwar/fix-volume-info-generatename
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m21s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 15s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 11s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 3m52s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m6s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 57s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m4s
Track actual resource names for GenerateName in restore status
2025-11-10 14:48:15 +08:00
Wenkai Yin(尹文开)
f2538207f3 Merge pull request #9390 from kaovilai/fix-artifact-upload-conflict
Update debug bundle artifact name to include Kubernetes version and job index
2025-11-10 14:19:18 +08:00
lyndon-li
d1e5d6b13a Merge pull request #9367 from shubham-pampattiwar/fix-managed-fields-generatename
Fix managed fields patch for resources using GenerateName
2025-11-10 13:50:42 +08:00
lyndon-li
99145bee70 Merge pull request #9391 from Lyndon-Li/cache-volume-for-dd
Cache volume support for DataDownload
2025-11-10 11:00:36 +08:00
Xun Jiang/Bruce Jiang
9e5769c304 Merge pull request #9057 from Joeavaikath/feat/wildcard-namespaces
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 55s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 11s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 13s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 7m20s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 2m9s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m40s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m4s
Design: Feat/wildcard namespaces
2025-11-07 13:34:41 +08:00
Lyndon-Li
21b998e2c5 cache volume for data download
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-11-06 18:52:41 +08:00
Lyndon-Li
fbeab7291e Merge branch 'main' into cache-volume-for-dd 2025-11-06 15:12:31 +08:00
lyndon-li
d4a966481b Merge pull request #9379 from Lyndon-Li/repo-static-info-provider
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m6s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 10s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 13s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 7m58s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m41s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m13s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m0s
Repo provider interface refactor for repo static configuration
2025-11-06 15:11:43 +08:00
Tiger Kaovilai
6e54879f4d update debug bundle artifact name to include Kubernetes version and job index
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
2025-11-05 14:25:39 -05:00
lyndon-li
e485258d25 Merge pull request #9389 from sseago/fs-backup-secctx
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 54s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 8s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 13s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 10m19s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m16s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m11s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m1s
don't copy securitycontext from first container if configmap found
2025-11-05 12:41:30 +08:00
Scott Seago
bd7d28f004 don't copy securitycontext from first container if configmap found
Signed-off-by: Scott Seago <sseago@redhat.com>
2025-11-04 16:56:35 -05:00
Lyndon-Li
7dbe2b4358 cache volume for data download
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-11-04 16:59:52 +08:00
Lyndon-Li
597cee545a repo provider interface refactor for repo static configuration
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-11-04 16:32:55 +08:00
Lyndon-Li
9556a39a89 repo provider interface refactor for repo static configuration
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-11-03 16:16:05 +08:00
lyndon-li
45755e14ee Merge pull request #9369 from vmware-tanzu/dependabot/github_actions/actions/upload-artifact-5
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 51s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 10s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 10s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 9m31s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m28s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m9s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m5s
Bump actions/upload-artifact from 4 to 5
2025-11-03 14:38:01 +08:00
lyndon-li
c907b316a5 Merge pull request #9370 from Lyndon-Li/cache-volume-configuration
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 47s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 11s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 11s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 14m2s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 5m12s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m59s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m15s
Add cache volume configuration
2025-11-01 10:38:54 +08:00
lyndon-li
75d69e1a04 Merge pull request #9362 from Lyndon-Li/cache-volume-for-exposer
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 58s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 13s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 12s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m7s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 50s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m4s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 44s
Exposer supports cache volume
2025-10-31 17:06:40 +08:00
lyndon-li
a95c90411c Merge branch 'main' into cache-volume-configuration 2025-10-29 15:50:07 +08:00
Lyndon-Li
7178946deb cache volume configuration
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-29 15:05:52 +08:00
lyndon-li
14d2f0b30b Merge pull request #9375 from Lyndon-Li/issue-fix-9365
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 59s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 9s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 16s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m18s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 56s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m4s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 53s
issue 9365: prevent multiple update of PVR
2025-10-29 14:58:39 +08:00
Lyndon-Li
31a7236c7d issue 9365: prevent multiple update of PVR
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-29 14:32:48 +08:00
lyndon-li
4111eb3940 Merge branch 'main' into cache-volume-for-exposer 2025-10-28 14:44:30 +08:00
Lyndon-Li
b1e5e4408f exposer supports cache volume
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-28 14:43:51 +08:00
lyndon-li
0d7ef85f98 Merge pull request #9353 from Lyndon-Li/cache-dir-for-udmprepo
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 49s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 11s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 1m34s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m28s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 55s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m7s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 55s
Cache dir for udmrepo
2025-10-28 13:58:47 +08:00
lyndon-li
94cf7b39a8 Merge pull request #9357 from sseago/incremental-bytes2
Add incrementalBytes to DU/PVB for reporting new/changed size
2025-10-28 13:18:49 +08:00
dependabot[bot]
5e9605131b Bump actions/upload-artifact from 4 to 5
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-27 20:02:29 +00:00
Shubham Pampattiwar
c2840f1c74 Fix tests: populate createdName for all created resources
Update test expectations to include createdName field for resources
with action 'created'. Also ensure namespaces track their created
names when created via EnsureNamespaceExistsAndIsReady.

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-10-27 12:44:31 -07:00
Scott Seago
5fc76db8c0 Add incrementalSize to DU/PVB for reporting new/changed size
Signed-off-by: Scott Seago <sseago@redhat.com>
2025-10-27 15:38:31 -04:00
Shubham Pampattiwar
f9f0e48e04 Add changelog for PR 9367
Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-10-27 12:11:06 -07:00
Shubham Pampattiwar
2e9998b20e Add changelog for PR 9368
Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-10-27 12:07:55 -07:00
Shubham Pampattiwar
07f30d06b9 Track actual resource names for GenerateName in restore status
When restoring resources with GenerateName, Kubernetes assigns the actual name
after creation, but Velero only tracked the original name from the backup in
itemKey. This caused volume information collection to fail when trying to fetch
PVCs using the original name instead of the actual created name.

Example:
- Original PVC name from backup: "test-vm-disk-1"
- Actual created PVC name: "test-vm-backup-2025-10-27-test-vm-disk-1-mdjkd"
- Volume info tried to fetch: "test-vm-disk-1" → Failed with "not found"

This affects any plugin or workflow using GenerateName during restore:
- kubevirt-velero-plugin (VMFR use case with PVC collision avoidance)
- Custom restore item actions using generateName
- Secrets/ConfigMaps restored with generateName

Changes:
1. Add createdName field to restoredItemStatus struct (pkg/restore/request.go)
2. Capture actual name from createdObj.GetName() (pkg/restore/restore.go:1520)
3. Use createdName in RestoredResourceList() when available (pkg/restore/request.go:93-95)

This fix is backwards compatible:
- createdName defaults to empty string
- When empty, falls back to itemKey.name (original behavior)
- Only populated for GenerateName resources where needed

Fixes volume information collection errors like:
"Failed to get PVC" error="persistentvolumeclaims \"<original-name>\" not found"

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-10-27 12:04:46 -07:00
Shubham Pampattiwar
898fa13ed7 Fix managed fields patch for resources using GenerateName
When restoring resources with GenerateName (where name is empty and K8s
assigns the actual name), the managed fields patch was failing with error
"name is required" because it was using obj.GetName() which returns empty
for GenerateName resources.

The fix uses createdObj.GetName() instead, which contains the actual name
assigned by Kubernetes after resource creation.

This affects any resource using GenerateName for restore, including:
- PersistentVolumeClaims restored by kubevirt-velero-plugin
- Secrets and ConfigMaps created with generateName
- Any custom resources using generateName

Changes:
- Line 1707: Use createdObj.GetName() instead of obj.GetName() in Patch call
- Lines 1702, 1709, 1713, 1716: Use createdObj in error/info messages for accuracy

This is a backwards-compatible fix since:
- For resources WITHOUT generateName: obj.GetName() == createdObj.GetName()
- For resources WITH generateName: createdObj.GetName() has the actual name

The managed fields patch was already correctly using createdObj (lines 1698-1700),
only the Patch() call was incorrectly using obj.

Fixes restore status showing FinalizingPartiallyFailed with "name is required"
error when restoring resources with GenerateName.

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-10-27 11:57:55 -07:00
lyndon-li
f4517f131b Merge branch 'main' into cache-dir-for-udmprepo 2025-10-27 16:34:21 +08:00
lyndon-li
f4af6156a1 Merge pull request #9354 from Lyndon-Li/snapshot-size-support
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 54s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 9s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 11s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m19s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 48s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 55s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 49s
Snapshot size support
2025-10-27 13:50:06 +08:00
Lyndon-Li
d4147e406b snapshot size in restore CRs
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-27 04:41:04 +00:00
lyndon-li
aafb616c12 Merge branch 'main' into cache-dir-for-udmprepo 2025-10-24 17:34:25 +08:00
Lyndon-Li
44f3166f0f Merge branch 'main' into cache-volume-for-exposer 2025-10-24 17:27:02 +08:00
lyndon-li
fb38727b9c Merge branch 'main' into snapshot-size-support 2025-10-24 16:55:50 +08:00
lyndon-li
c29ed91442 Merge pull request #9148 from Lyndon-Li/backup-repo-cache-design
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m4s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 13s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 13s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m10s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 46s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 51s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 57s
Backup repo cache design
2025-10-24 14:30:59 +08:00
lyndon-li
6280ffddaa Merge branch 'main' into backup-repo-cache-design 2025-10-23 15:03:17 +08:00
Lyndon-Li
2e3f41be22 backup repo cache design
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-23 15:02:17 +08:00
Lyndon-Li
8d29051bbe expose supports cache volume
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-22 19:11:56 +08:00
Lyndon-Li
6dbe772590 snapshot size in restore CRs only
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-22 12:41:41 +08:00
Lyndon-Li
2aa319aa30 add snapshot size to data mover CRs
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-21 16:48:48 +08:00
Lyndon-Li
903ee21f31 Merge branch 'main' into snapshot-size-support 2025-10-21 16:47:16 +08:00
Lyndon-Li
a4e3dccdce Merge branch 'main' into cache-dir-for-udmprepo 2025-10-21 16:42:28 +08:00
Wenkai Yin(尹文开)
53e99556ad Merge pull request #9291 from Lyndon-Li/dont-connect-repo-in-repo-controller
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 55s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 9s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 9s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m16s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m7s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 52s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 47s
Don't connect repo in repo controller
2025-10-21 16:10:20 +08:00
Lyndon-Li
166f50d776 add snapshot size to data mover CRs
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-21 15:14:38 +08:00
Lyndon-Li
6c9699a06d udmrepo support cache dir
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-21 13:39:37 +08:00
Lyndon-Li
79b8cc40b1 Merge branch 'main' into cache-dir-for-udmprepo 2025-10-21 13:37:11 +08:00
Lyndon-Li
3e39cb4b0f udmrepo support cache dir
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-21 13:36:35 +08:00
lyndon-li
9b02402631 Merge pull request #9342 from Lyndon-Li/cache-dir-to-vgdp
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 55s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 7s
Main CI / Build (push) Has been skipped
Add cache configuration to VGDP
2025-10-21 13:19:00 +08:00
Lyndon-Li
6bd8033d24 add cache dir to VGDP
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-17 18:00:03 +08:00
Lyndon-Li
7cec76e445 Merge branch 'main' into cache-dir-to-vgdp 2025-10-17 17:58:01 +08:00
lyndon-li
420a65a116 Merge pull request #9269 from Lyndon-Li/deprecate-pvc-node-selection
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 57s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 8s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 10s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m0s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m0s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 52s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 53s
Deprecate pvc node selection feature
2025-10-17 15:48:34 +08:00
Wenkai Yin(尹文开)
3bf4a7dced Merge pull request #9312 from vmware-tanzu/dependabot/github_actions/actions/stale-10.1.0
Bump actions/stale from 10.0.0 to 10.1.0
2025-10-17 14:09:23 +08:00
lyndon-li
2a5804b595 Merge branch 'main' into deprecate-pvc-node-selection 2025-10-17 13:31:21 +08:00
lyndon-li
b9af3a1947 Merge branch 'main' into dont-connect-repo-in-repo-controller 2025-10-17 13:31:11 +08:00
lyndon-li
9a3fabbc55 issue 9332: make bytesDone correct for incremental backup (#9333)
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 58s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 8s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 10s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m18s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 50s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 47s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 48s
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-16 12:41:21 -04:00
lyndon-li
99a46ed818 Merge pull request #9329 from T4iFooN-IX/9327
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m9s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 11s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 17s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m22s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 56s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 56s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 53s
Fix typos (#9327)
2025-10-15 10:02:58 +08:00
Daniel Wituschek
93e8379530 Add changelog file
Signed-off-by: Daniel Wituschek <daniel.wituschek@intrexx.com>
2025-10-14 14:58:05 +02:00
lyndon-li
72ddfd7d78 Merge branch 'main' into deprecate-pvc-node-selection 2025-10-14 14:40:40 +08:00
lyndon-li
18260d88ca Merge branch 'main' into dont-connect-repo-in-repo-controller 2025-10-14 14:40:21 +08:00
Daniel Wituschek
8b3ba78c8c Fix typos
Signed-off-by: Daniel Wituschek <daniel.wituschek@intrexx.com>
2025-10-13 16:00:23 +02:00
Daniel Wituschek
b34f2deff2 Fix typos
Signed-off-by: Daniel Wituschek <daniel.wituschek@intrexx.com>
2025-10-13 15:58:20 +02:00
dependabot[bot]
e9666f9aea Bump actions/stale from 10.0.0 to 10.1.0
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m8s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 4s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Bumps [actions/stale](https://github.com/actions/stale) from 10.0.0 to 10.1.0.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/stale/compare/v10.0.0...v10.1.0)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-version: 10.1.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-10 11:17:07 +08:00
lyndon-li
e6aab8ca93 add events to diagnose (#9296)
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m10s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 16s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 11s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m43s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m2s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m2s
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-10-09 14:13:43 -04:00
Xun Jiang/Bruce Jiang
99f12b85ba Merge pull request #9301 from vmware-tanzu/fix_action_isse
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 39s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 3s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Successful in 15s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m26s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 55s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 59s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 51s
Fix the push action invalid variable ref issue.
2025-09-30 16:40:02 +08:00
Xun Jiang/Bruce Jiang
f8938e7fed VerifyJSONConfigs verify every elements in Data. (#9302)
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 48s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 4s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Failing after 6s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 7s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 4s
Add error message in the velero install CLI output if VerifyJSONConfigs fail.
Only allow one element in node-agent-configmap's Data.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-09-29 15:08:05 -04:00
Xun Jiang
cabb04575e Fix the push action invalid variable ref issue.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-09-27 23:33:37 +08:00
lyndon-li
60dbcbc60d Merge pull request #9295 from sseago/privileged-fs-backup-pods
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 44s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 4s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Failing after 6s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 3s
Privileged fs backup pods
2025-09-26 10:31:59 +08:00
Scott Seago
4ade8cf8a2 Add option for privileged fs-backup pod
Signed-off-by: Scott Seago <sseago@redhat.com>
2025-09-25 15:38:39 -04:00
Joseph
75f1817cba Simplify
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-09-25 09:28:20 -04:00
Lyndon-Li
78ddeef96c add cache dir to VGDP
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-09-25 18:19:23 +08:00
lyndon-li
814db6541f Merge branch 'main' into backup-repo-cache-design 2025-09-25 16:50:22 +08:00
Joseph
cf7a9495c5 Leaner design
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-09-25 04:40:13 -04:00
Joseph
14a6315667 update
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-09-25 04:09:59 -04:00
Daniel Jiang
826c73131e Merge pull request #9233 from Lyndon-Li/backup-pvc-to-different-node
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 59s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 4s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Failing after 4s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 4s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 2s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 3s
backupPVC to different node
2025-09-24 22:16:36 +08:00
lyndon-li
c90856de65 Merge branch 'main' into dont-connect-repo-in-repo-controller 2025-09-23 16:12:09 +08:00
Lyndon-Li
a0aac09f0a don't connect repo in repo controller
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-09-23 16:11:24 +08:00
lyndon-li
903caa9c02 Merge branch 'main' into backup-repo-cache-design 2025-09-23 11:47:22 +08:00
lyndon-li
031df8d5e0 Merge branch 'main' into deprecate-pvc-node-selection 2025-09-23 11:46:00 +08:00
lyndon-li
21691451e9 Merge branch 'main' into backup-pvc-to-different-node 2025-09-23 11:43:24 +08:00
lyndon-li
50d7b1cff1 Merge pull request #9248 from 0xLeo258/main
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 1m0s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 5s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Failing after 5m54s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 2m33s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 1m22s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 1m49s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 1m4s
Issue #9247: Protect VolumeSnapshot field from race condition
2025-09-23 11:39:57 +08:00
lyndon-li
37df853a99 Merge branch 'main' into deprecate-pvc-node-selection 2025-09-23 11:39:05 +08:00
lyndon-li
d545ad49ba Merge branch 'main' into main 2025-09-23 11:10:38 +08:00
lyndon-li
7831bf25b9 Merge pull request #9281 from 0xLeo258/issue9234
Issue #9234: Fix plugin reentry with safe VolumeSnapshotterCache
2025-09-23 11:06:21 +08:00
Joseph
0b40702900 Updates
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-09-22 17:31:22 -04:00
Xun Jiang/Bruce Jiang
2abe91e08c Merge pull request #9250 from blackpiglet/distinguish_go_version_for_main_and_other_release
Some checks failed
Run the E2E test on kind / get-go-version (push) Failing after 42s
Run the E2E test on kind / build (push) Has been skipped
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / get-go-version (push) Failing after 3s
Main CI / Build (push) Has been skipped
Close stale issues and PRs / stale (push) Failing after 9s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 7s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 3s
Use different go version check logic for main and other branches.
2025-09-22 22:48:21 +08:00
0xLeo258
1ebe357d18 Add built-in mutex for SynchronizedVSList && Update unit tests
Signed-off-by: 0xLeo258 <noixe0312@gmail.com>
2025-09-20 09:13:07 +08:00
0xLeo258
9df17eb02b add changelog
Signed-off-by: 0xLeo258 <noixe0312@gmail.com>
2025-09-20 09:13:07 +08:00
0xLeo258
f2a27c3864 fix9247: Protect VolumeSnapshot field
Signed-off-by: 0xLeo258 <noixe0312@gmail.com>
2025-09-20 09:13:07 +08:00
lyndon-li
8ee3436f5c Merge branch 'main' into deprecate-pvc-node-selection 2025-09-19 17:24:38 +08:00
Xun Jiang
4847eeaf62 Use different go version check logic for main and other branches.
main branch will read go version from go.mod's go primitive, and
only keep major and minor version, because we want the actions to use
the lastest patch version automatically, even the go.mod specify version
like 1.24.0.
release branch can read the go version from go.mod file by setup-go
action's own logic.
Refactor the get Go version to reusable workflow.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-09-19 16:58:18 +08:00
dependabot[bot]
1ec281a64e Bump actions/setup-go from 5 to 6 (#9231)
Some checks failed
Run the E2E test on kind / build (push) Failing after 10s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 5s
Close stale issues and PRs / stale (push) Failing after 2m15s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 7s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 4s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 4s
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5 to 6.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-18 12:29:45 -04:00
0xLeo258
25de1bb3b6 add changelog
Signed-off-by: 0xLeo258 <noixe0312@gmail.com>
2025-09-18 17:36:07 +08:00
0xLeo258
e21b21c19e fix 9234: Add safe VolumeSnapshotterCache
Signed-off-by: 0xLeo258 <noixe0312@gmail.com>
2025-09-18 17:21:25 +08:00
Xun Jiang/Bruce Jiang
b19cad9d01 Merge pull request #9280 from kaovilai/bitnamiminio
Some checks failed
Run the E2E test on kind / build (push) Failing after 3s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 3s
Fix E2E tests: Build MinIO from Bitnami Dockerfile to replace deprecated image
2025-09-18 14:25:41 +08:00
Tiger Kaovilai
9b6c4b1d47 Fix E2E tests: Build MinIO from Bitnami Dockerfile to replace deprecated image
The Bitnami MinIO image bitnami/minio:2021.6.17-debian-10-r7 is no longer
available on Docker Hub, causing E2E tests to fail.

This change implements a solution to build the MinIO image locally from
Bitnami's public Dockerfile and cache it for subsequent runs:
- Fetches the latest commit hash of the Bitnami MinIO Dockerfile
- Uses GitHub Actions cache to store/retrieve built images
- Only rebuilds when the upstream Dockerfile changes
- Maintains compatibility with existing environment variables

Fixes #9279

🤖 Generated with [Claude Code](https://claude.ai/code)

Update .github/workflows/e2e-test-kind.yaml

Signed-off-by: Tiger Kaovilai <passawit.kaovilai@gmail.com>
Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
2025-09-17 19:08:07 -04:00
Lyndon-Li
b9159c22ca deprecate pvc node selection
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-09-17 15:22:42 +08:00
lyndon-li
112bea520e Merge branch 'main' into deprecate-pvc-node-selection 2025-09-17 15:19:40 +08:00
Lyndon-Li
7e15e9ba05 deprecate pvc node selection
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-09-17 15:09:54 +08:00
Wenkai Yin(尹文开)
f50cafa472 Merge pull request #9264 from shubham-pampattiwar/fix-backup-q-accum
Some checks failed
Run the E2E test on kind / build (push) Failing after 7s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 4s
Close stale issues and PRs / stale (push) Failing after 10m30s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 11s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 4s
Fix Schedule Backup Queue Accumulation During Extended Blocking Scenarios
2025-09-17 14:07:32 +08:00
Shubham Pampattiwar
a7b2985c83 add changelog file
Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-09-15 16:07:40 -07:00
Shubham Pampattiwar
59289fba76 Fix Schedule Backup Queue Accumulation During Extended Blocking Scenarios
Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-09-15 16:01:33 -07:00
Wenkai Yin(尹文开)
925479553a Merge pull request #9256 from shubham-pampattiwar/inhrerit-tolr-jobs
Some checks failed
Run the E2E test on kind / build (push) Failing after 9s
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 3s
Close stale issues and PRs / stale (push) Failing after 4m11s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m1s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 4s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 4s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 5s
Fix maintenance jobs toleration inheritance from Velero deployment
2025-09-15 14:49:21 +08:00
lyndon-li
47340e67af Merge branch 'main' into backup-pvc-to-different-node 2025-09-12 13:30:34 +08:00
Lyndon-Li
25a7ef0e87 backupPVC to different node
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-09-12 13:27:58 +08:00
lyndon-li
799d596d5c Merge pull request #9226 from sseago/iba-perf
Some checks failed
Run the E2E test on kind / build (push) Failing after 5s
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 4s
Close stale issues and PRs / stale (push) Failing after 2m10s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 39s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 6s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 5s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 6s
Get pod list once per namespace in pvc IBA
2025-09-12 10:55:43 +08:00
Shubham Pampattiwar
5ba00dfb09 Fix maintenance jobs toleration inheritance from Velero deployment
Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

fix codespell and add changelog file

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2025-09-11 16:04:26 -07:00
Priyansh Choudhary
f1476defde Update AzureAD Microsoft Authentication Library to v1.5.0 (#9244)
Some checks failed
Run the E2E test on kind / build (push) Failing after 9s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 4s
Close stale issues and PRs / stale (push) Failing after 6m3s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 38s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 2s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 6s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 11m22s
* Update AzureAD Microsoft Authentication Library to v1.5.0
Signed-off-by: Priyansh Choudhary <im1706@gmail.com>

* Added Changelog
Signed-off-by: Priyansh Choudhary <im1706@gmail.com>

---------

Signed-off-by: Priyansh Choudhary <im1706@gmail.com>
2025-09-11 14:07:46 -04:00
Xun Jiang/Bruce Jiang
67ff0dcbe0 Merge pull request #9240 from vmware-tanzu/update_velero_supported_k8s_versions
Some checks failed
Run the E2E test on kind / build (push) Failing after 8s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 4s
Add v1.34.0 for v1.17 compatible k8s versions.
2025-09-11 15:20:13 +08:00
lyndon-li
aad9dd9068 Merge branch 'main' into backup-pvc-to-different-node 2025-09-11 14:47:35 +08:00
lyndon-li
b636334079 Merge pull request #9241 from blackpiglet/bump_k8s_lib_to_1.33_for_main
Bump k8s library to v1.33.
2025-09-11 14:20:40 +08:00
lyndon-li
4d44705ed8 Merge branch 'main' into backup-pvc-to-different-node 2025-09-11 13:08:22 +08:00
Lyndon-Li
81c5b6692d backupPVC to different node
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-09-11 13:04:24 +08:00
Xun Jiang
e8208097ba Bump k8s library to v1.33.
Replace deprecated EventExpansion method with WithContext methods.
Modify UTs.
Align the E2E ginkgo CLI version with go.mod

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-09-10 17:58:38 +08:00
Xun Jiang
4c30499340 Add v1.34.0 for v1.17 compatible k8s versions.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-09-10 17:28:23 +08:00
Scott Seago
2a9203f1b2 Get pod list once per namespace in pvc IBA
Signed-off-by: Scott Seago <sseago@redhat.com>
2025-09-09 13:19:06 -04:00
Joseph
528392ac5b Added struct change
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
56df64b625 Status fields are part of a struct
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
eb8b382816 Update design
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
571c9bd3ef Enhance wildcard namespace support design document
- Expanded the design to include detailed implementation steps for wildcard expansion in both backup and restore operations.
- Added new status fields to the backup and restore CRDs to track expanded wildcard namespaces.
- Clarified the approach to ensure backward compatibility with existing `*` behavior.
- Addressed limitations and provided insights on restore operations handling wildcard-expanded backups.

This update aims to provide a comprehensive and clear framework for implementing wildcard namespace support in Velero.

Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
037db22afe Refine wildcard namespace support design document
- Clarified the use of the standalone `*` character in namespace specifications.
- Ensured consistent formatting for `*` throughout the document.
- Maintained focus on backward compatibility and limitations regarding wildcard usage.

This update enhances the clarity and consistency of the design document for implementing wildcard namespace support in Velero.

Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
4c1457c318 Enhance wildcard namespace support in backup and restore design document
- Updated the abstract to clarify the current limitations of namespace specifications in Velero.
- Expanded the goals section to include specific objectives for implementing wildcard patterns in `--include-namespaces` and `--exclude-namespaces`.
- Detailed the high-level design and implementation steps, including the addition of new status fields in the backup CRD and the creation of a utility package for wildcard expansion.
- Addressed backward compatibility and known limitations regarding the use of wildcards alongside the existing "*" character.

This update aims to provide a comprehensive overview of the proposed changes for improved namespace selection flexibility.

Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
c0699c443b New design doc
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
69e307918b Update design
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
Joseph
571a816a61 Add design doc
Signed-off-by: Joseph <jvaikath@redhat.com>
2025-08-19 17:44:45 -07:00
lyndon-li
2de5a5c1a7 Merge branch 'main' into backup-repo-cache-design 2025-08-15 18:00:11 +08:00
Lyndon-Li
133db854b2 backup repo cache design
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-08-15 17:55:50 +08:00
159 changed files with 3704 additions and 844 deletions

View File

@@ -8,16 +8,26 @@ on:
- "design/**"
- "**/*.md"
jobs:
get-go-version:
uses: ./.github/workflows/get-go-version.yaml
with:
ref: ${{ github.event.pull_request.base.ref }}
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
build:
runs-on: ubuntu-latest
needs: get-go-version
outputs:
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
steps:
- name: Check out the code
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v5
- name: Set up Go version
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version: ${{ needs.get-go-version.outputs.version }}
# Look for a CLI that's made for this PR
- name: Fetch built CLI
id: cli-cache
@@ -44,6 +54,26 @@ jobs:
run: |
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
docker save velero:pr-test-linux-amd64 -o ./velero.tar
# Check and build MinIO image once for all e2e tests
- name: Check Bitnami MinIO Dockerfile version
id: minio-version
run: |
DOCKERFILE_SHA=$(curl -s https://api.github.com/repos/bitnami/containers/commits?path=bitnami/minio/2025/debian-12/Dockerfile\&per_page=1 | jq -r '.[0].sha')
echo "dockerfile_sha=${DOCKERFILE_SHA}" >> $GITHUB_OUTPUT
- name: Cache MinIO Image
uses: actions/cache@v4
id: minio-cache
with:
path: ./minio-image.tar
key: minio-bitnami-${{ steps.minio-version.outputs.dockerfile_sha }}
- name: Build MinIO Image from Bitnami Dockerfile
if: steps.minio-cache.outputs.cache-hit != 'true'
run: |
echo "Building MinIO image from Bitnami Dockerfile..."
git clone --depth 1 https://github.com/bitnami/containers.git /tmp/bitnami-containers
cd /tmp/bitnami-containers/bitnami/minio/2025/debian-12
docker build -t bitnami/minio:local .
docker save bitnami/minio:local > ${{ github.workspace }}/minio-image.tar
# Create json of k8s versions to test
# from guide: https://stackoverflow.com/a/65094398/4590470
setup-test-matrix:
@@ -75,6 +105,7 @@ jobs:
needs:
- build
- setup-test-matrix
- get-go-version
runs-on: ubuntu-latest
strategy:
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
@@ -82,13 +113,26 @@ jobs:
steps:
- name: Check out the code
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v5
- name: Set up Go version
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version: ${{ needs.get-go-version.outputs.version }}
# Fetch the pre-built MinIO image from the build job
- name: Fetch built MinIO Image
uses: actions/cache@v4
id: minio-cache
with:
path: ./minio-image.tar
key: minio-bitnami-${{ needs.build.outputs.minio-dockerfile-sha }}
- name: Load MinIO Image
run: |
echo "Loading MinIO image..."
docker load < ./minio-image.tar
- name: Install MinIO
run:
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
run: |
docker run -d --rm -p 9000:9000 -e "MINIO_ROOT_USER=minio" -e "MINIO_ROOT_PASSWORD=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:local
- uses: engineerd/setup-kind@v0.6.2
with:
skipClusterLogsExport: true
@@ -141,7 +185,7 @@ jobs:
timeout-minutes: 30
- name: Upload debug bundle
if: ${{ failure() }}
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: DebugBundle
name: DebugBundle-k8s-${{ matrix.k8s }}-job-${{ strategy.job-index }}
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*

33
.github/workflows/get-go-version.yaml vendored Normal file
View File

@@ -0,0 +1,33 @@
on:
workflow_call:
inputs:
ref:
description: "The target branch's ref"
required: true
type: string
outputs:
version:
description: "The expected Go version"
value: ${{ jobs.extract.outputs.version }}
jobs:
extract:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.pick-version.outputs.version }}
steps:
- name: Check out the code
uses: actions/checkout@v5
- id: pick-version
run: |
if [ "${{ inputs.ref }}" == "main" ]; then
version=$(grep '^go ' go.mod | awk '{print $2}' | cut -d. -f1-2)
else
goDirectiveVersion=$(grep '^go ' go.mod | awk '{print $2}')
toolChainVersion=$(grep '^toolchain ' go.mod | awk '{print $2}')
version=$(printf "%s\n%s\n" "$goDirectiveVersion" "$toolChainVersion" | sort -V | tail -n1)
fi
echo "version=$version"
echo "version=$version" >> $GITHUB_OUTPUT

View File

@@ -1,18 +1,26 @@
name: Pull Request CI Check
on: [pull_request]
jobs:
get-go-version:
uses: ./.github/workflows/get-go-version.yaml
with:
ref: ${{ github.event.pull_request.base.ref }}
build:
name: Run CI
needs: get-go-version
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- name: Check out the code
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v5
- name: Set up Go version
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version: ${{ needs.get-go-version.outputs.version }}
- name: Make ci
run: make ci
- name: Upload test coverage

View File

@@ -7,16 +7,24 @@ on:
- "design/**"
- "**/*.md"
jobs:
get-go-version:
uses: ./.github/workflows/get-go-version.yaml
with:
ref: ${{ github.event.pull_request.base.ref }}
build:
name: Run Linter Check
runs-on: ubuntu-latest
needs: get-go-version
steps:
- name: Check out the code
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v5
- name: Set up Go version
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version: ${{ needs.get-go-version.outputs.version }}
- name: Linter check
uses: golangci/golangci-lint-action@v8
with:

View File

@@ -9,17 +9,24 @@ on:
- '*'
jobs:
get-go-version:
uses: ./.github/workflows/get-go-version.yaml
with:
ref: ${{ github.ref }}
build:
name: Build
runs-on: ubuntu-latest
needs: get-go-version
steps:
- name: Check out the code
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v5
- name: Set up Go version
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version: ${{ needs.get-go-version.outputs.version }}
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v3

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10.0.0
- uses: actions/stale@v10.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."

View File

@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Velero image packing section
FROM paketobuildpacks/run-jammy-tiny:0.2.73
FROM paketobuildpacks/run-jammy-tiny:latest
LABEL maintainer="Xun Jiang <jxun@vmware.com>"

View File

@@ -42,7 +42,7 @@ The following is a list of the supported Kubernetes versions for each Velero ver
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|----------------|-------------------------------------------|-------------------------------------|
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, and 1.33.1 |
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |

View File

@@ -0,0 +1 @@
Fix issue #7725, add design for backup repo cache configuration

View File

@@ -0,0 +1 @@
Get pod list once per namespace in pvc IBA

View File

@@ -0,0 +1 @@
Fix issue #9229, don't attach backupPVC to the source node

View File

@@ -0,0 +1 @@
Update AzureAD Microsoft Authentication Library to v1.5.0

View File

@@ -0,0 +1 @@
Protect VolumeSnapshot field from race condition during multi-thread backup

View File

@@ -0,0 +1 @@
Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment

View File

@@ -0,0 +1 @@
Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases

View File

@@ -0,0 +1 @@
Fix issue #7904, remove the code and doc for PVC node selection

View File

@@ -0,0 +1 @@
Implement concurrency control for cache of native VolumeSnapshotter plugin.

View File

@@ -0,0 +1 @@
Fix issue #9193, don't connect repo in repo controller

View File

@@ -0,0 +1 @@
Add option for privileged fs-backup pod

View File

@@ -0,0 +1 @@
Fix issue #9267, add events to data mover prepare diagnostic

View File

@@ -0,0 +1 @@
VerifyJSONConfigs verify every elements in Data.

View File

@@ -0,0 +1 @@
Fix typos in documentation

View File

@@ -0,0 +1 @@
Fix issue #9332, add bytesDone for cache files

View File

@@ -0,0 +1 @@
Add cache configuration to VGDP

View File

@@ -0,0 +1 @@
Add cache dir configuration for udmrepo

View File

@@ -0,0 +1 @@
Add snapshotSize for DataDownload, PodVolumeRestore

View File

@@ -0,0 +1 @@
Add incrementalSize to DU/PVB for reporting new/changed size

View File

@@ -0,0 +1 @@
Support cache volume for generic restore exposer and pod volume exposer

View File

@@ -0,0 +1 @@
Fix managed fields patch for resources using GenerateName

View File

@@ -0,0 +1 @@
Track actual resource names for GenerateName in restore status

View File

@@ -0,0 +1 @@
Add cache volume configuration

View File

@@ -0,0 +1 @@
Fix issue #9365, prevent fake completion notification due to multiple update of single PVR

View File

@@ -0,0 +1 @@
Refactor repo provider interface for static configuration

View File

@@ -0,0 +1 @@
don't copy securitycontext from first container if configmap found

View File

@@ -0,0 +1 @@
Cache volume support for DataDownload

View File

@@ -33,6 +33,12 @@ spec:
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Incremental bytes
format: int64
jsonPath: .status.incrementalBytes
name: Incremental Bytes
priority: 10
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
@@ -189,6 +195,11 @@ spec:
format: date-time
nullable: true
type: string
incrementalBytes:
description: IncrementalBytes holds the number of bytes new or changed
since the last backup
format: int64
type: integer
message:
description: Message is a message about the pod volume backup's status.
type: string

View File

@@ -133,6 +133,10 @@ spec:
snapshotID:
description: SnapshotID is the ID of the volume snapshot to be restored.
type: string
snapshotSize:
description: SnapshotSize is the logical size in Bytes of the snapshot.
format: int64
type: integer
sourceNamespace:
description: SourceNamespace is the original namespace for namaspace
mapping.

File diff suppressed because one or more lines are too long

View File

@@ -108,6 +108,10 @@ spec:
description: SnapshotID is the ID of the Velero backup snapshot to
be restored from.
type: string
snapshotSize:
description: SnapshotSize is the logical size in Bytes of the snapshot.
format: int64
type: integer
sourceNamespace:
description: |-
SourceNamespace is the original namespace where the volume is backed up from.

View File

@@ -33,6 +33,12 @@ spec:
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Incremental bytes
format: int64
jsonPath: .status.incrementalBytes
name: Incremental Bytes
priority: 10
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
@@ -173,6 +179,11 @@ spec:
as a result of the DataUpload.
nullable: true
type: object
incrementalBytes:
description: IncrementalBytes holds the number of bytes new or changed
since the last backup
format: int64
type: integer
message:
description: Message is a message about the DataUpload's status.
type: string

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,231 @@
# Backup Repository Cache Volume Design
## Glossary & Abbreviation
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
**Data Mover Pods**: Intermediate pods which hold VGDP and complete the data transfer. See [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3] for details.
**Repository Maintenance Pods**: Pods for [Repository Maintenance Jobs][4], which holds VGDP to run repository maintenance.
## Background
According to the [Unified Repository design][1] Velero uses selectable backup repositories for various backup/restore methods, i.e., fs-backup, volume snapshot data movement, etc. Some backup repositories may need to cache data on the client side for various repository operation, so as to accelerate the execution.
In the existing [Backup Repository Configuration][5], we allow users to configure the cache data size (`cacheLimitMB`). However, the cache data is still stored in the root file system of data mover pods/repository maintenance pods, so stored in the root file system of the node. This is not good enough, reasons:
- In many distributions, the node's system disk size is predefined, non configurable and limit, e.g., the system disk size may be 20G or less
- Velero supports concurrent data movements in each node. The cache in each of the concurrent data mover pods could quickly run out of the system disk and cause problems like pod eviction, failure of pod creation, degradation of Kubernetes QoS, etc.
We need to allow users to prepare a dedicated location, e.g., a dedictated volume, for the cache.
Not all backup repositories or not all backup repository operations require cache, we need to define the details when and how the cache is used.
## Goals
- Create a mechanism for users to configure cache volumes for various pods running VGDP
- Design the workflow to assign the cache volume pod path to backup repositories
- Describe when and how the cache volume is used
## Non-Goals
- The solution is based on [Unified Repository design][1], [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3], legacy data paths are not supported. E.g., when a pod volume restore (PVR) runs with legacy Restic path, if any data is cached, the cache still resides in the root file system.
## Solution
### Cache Data
Varying on backup repositoires, cache data may include payload data or repository metadata, e.g., indexes to the payload data chunks.
Payload data is highly related to the backup data, and normally take the majority of the repository data as well as the cache data.
Repository metadata is related to the backup repository's chunking algorithm, data chunk mapping method, etc, and so the size is not proportional to the backup data size.
On the other hand for some backup repository, in extreme cases, the repository metadata may be significantly large. E.g., Kopia's indexes are per chunks, if there are huge number of small files in the repository, Kopia's index data may be in the same level of or even larger than the payload data.
However, in the cases that repository metadata data become the majority, other bottlenecks may emerge and concurrency of data movers may be significantly constrained, so the requirement to cache volumes may go away.
Therefore, for now we only consider the cache volume requirement for payload data, and leave the consideration for metadata as a future enhancement.
### Scenarios
Backup repository cache varies on backup repositories and backup repository operation during VGDP runs. Below are the scenarios when VGDP runs:
- Data Upload for Backup: this is the process to upload/write the backup data into the backup repository, e.g., DataUpload or PodVolumeBackup. The pieces of data is almost directly written to the repository, sometimes with a small group staying shortly in the local place. That is to say, there should not be large scale data cached for this scenario, so we don't prepare dedicated cache for this scenario.
- Repository Maintenance: Repository maintenance most often visits the backup repository's metadata and sometimes it needs to visit the file system directories from the backed up data. On the other hand, it is not practical to run concurrent maintenance jobs in one node. So the cache data is neither large nor affect the root file system too much. Therefore, we don't need to prepare dedicated cache for this scenario.
- Data Download for Restore: this is the process to download/read the backup data from the backup repository during restore, e.g., DataDownload or PodVolumeRestore. For backup repositories for which data are stored in remote backup storages (e.g., Kopia repository stores data in remote object stores), large scale of data are cached locally to accerlerate the restore. Therefore, we need dedicate cache volumes for this scenario.
- Backup Deletion: During this scenario, backup repository is connected, metadata is enumerated to find the repository snapshot representing the backup data. That is to say, only metadata is cached if any. Therefore, dedicated cache volumes are not required in this scenario.
The above analyses are based on the common behavior of backup repositories and they are not considering the case that backup repository metadata takes majority or siginficant proportion of the cache data.
As a conclusion of the analyses, we will create dedicated cache volumes for restore scenarios.
For other scenarios, we can add them regarded to the future changes/requirements. The mechanism to expose and connect the cache volumes should work for all scenarios. E.g., if we need to consider the backup repository metadata case, we may need cache volumes for backup and repository maintenance as well, then we can just reuse the same cache volume provision and connection mechanism to backup and repository maintenance scenarios.
### Cache Data and Lifecycle
If available, one cache volume is dedicately assigned to one data mover pod. That is, the cached data is destroyed when the data mover pod completes. Then the backup repository instance also closes.
Cache data are fully managed by the specific backup repository. So the backup repository may also have its own way to GC the cache data.
That is to say, cache data GC may be launched by the backup repository instance during the running of the data mover pod; then the left data are automatically destroyed when the data mover pod and the cache PVC are destroyed (cache PVC's `reclaimPolicy` is always `Deleted`, so once the cache PVC is destroyed, the volume will also be destroyed). So no specially logics are needed for cache data GC.
### Data Size
Cache volumes take storage space and cluster resources (PVC, PV), therefore, cache volumes should be created only when necessary and the volumes should be with reasonable size based on the cache data size:
- It is not a good bargain to have cache volumes for small backups, small backups will use resident cache location (the cache location in the root file system)
- The cache data size has a limit, the existing `cacheLimitMB` is used for this purpose. E.g., it could be set as 1024 for a 1TB backup, which means 1GB of data is cached and the old cache data exceeding this size will be cleared. Therefore, it is meaningless to set the cache volume size much larger than `cacheLimitMB`
### Cache Volume Size
The cache volume size is calculated from below factors (for Restore scenarios):
- **Limit**: The limit of the cache data, that is represented by `cacheLimitMB`, the default value is 5GB
- **backupSize**: The size of the backup as a reference to evaluate whether to create a cache volume. It doesn't mean the backup data really decides the cache data all the time, it is just a reference to evaluate the scale of the backup, small scale backups may need small cache data. Sometimes, backupSize is not irrelevant to the size of cache data, in this case, ResidentThreshold should not be set, Limit will be used directly. It is unlikely that backupSize is unavailable, but once that happens, ResidentThreshold is ignored, Limit will be used directly.
- **ResidentThreshold**: The minimum backup size that a cache volume is created
- **InflationPercentage**: Considering the overhead of the file system and the possible delay of the cache cleanup, there should be an inflation for the final volume size vs. the logical size, otherwise, the cache volume may be overrun. This inflation percentage is hardcoded, e.g., 20%.
A formula is as below:
```
cacheVolumeSize = ((backupSize != 0 ? (backupSize > residentThreshold ? limit : 0) : limit) * (100 + inflationPercentage)) / 100
```
Finally, the `cacheVolumeSize` will be rounded up to GiB considering the UX friendliness, storage friendliness and management friendliness.
### PVC/PV
The PVC for a cache volume is created in Velero namespace and a storage class is required for the cache PVC. The PVC's accessMode is `ReadWriteOnce` and volumeMode is `FileSystem`, so the storage class provided should support this specification. Otherwise, if the storageclass doesn't support either of the specifications, the data mover pod may be hang in `Pending` state until a timeout setting with the data movement (e.g. `prepareTimeout`) and the data movement will finally fail.
It is not expected that the cache volume is retained after data mover pod is deleted, so the `reclaimPolicy` for the storageclass must be `Delete`.
To detect the problems in the storageclass and fail earlier, a validation is applied to the storageclass and once the validation fails, the cache configuration will be ignored, so the data mover pod will be created without a cache volume.
### Cache Volume Configurations
Below configurations are introduced:
- **residentThresholdMB**: the minimum data size(in MB) to be processed (if available) that a cache volume is created
- **cacheStorageClass**: the name of the storage class to provision the cache PVC
Not like `cacheLimitMB` which is set to and affect the backup repository, the above two configurations are actually data mover configurations of how to create cache volumes to data mover pods; and the two configurations don't need to be per backup repository. So we add them to the node-agent Configuration.
### Sample
Below are some examples of the node-agent configMap with the configurations:
Sample-1:
```json
{
"cacheVolume": {
"storageClass": "sc-1",
"residentThresholdMB": 1024
}
}
```
Sample-2:
```json
{
"cacheVolume": {
"storageClass": "sc-1",
}
}
```
Sample-3:
```json
{
"cacheVolume": {
"residentThresholdMB": 1024
}
}
```
**sample-1**: This is a valid configuration. Restores with backup data size larger than 1G will be assigned a cache volume using storage class `sc-1`.
**sample-2**: This is a valid configuration. Data mover pods are always assigned a cache volume using storage class `sc-1`.
**sample-3**: This is not a valid configuration because the storage class is absent. Velero gives up creating a cache volume.
To create the configMap, users need to save something like the above sample to a json file and then run below command:
```
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
```
The cache volume configurations will be visited by node-agent server, so they also need to specify the `--node-agent-configmap` to the `velero node-agent` parameters.
## Detailed Design
### Backup and Restore
The restore needs to know the backup size so as to calculate the cache volume size, some new fields are added to the DataDownload and PodVolumeRestore CRDs.
`snapshotSize` field is also added to DataDownload and PodVolumeRestore's `spec`:
```yaml
spec:
snapshotID:
description: SnapshotID is the ID of the Velero backup snapshot to
be restored from.
type: string
snapshotSize:
description: SnapshotSize is the logical size of the snapshot.
format: int64
type: integer
```
`snapshotSize` represents the total size of the backup; during restore, the value is transferred from DataUpload/PodVolumeBackup's `Status.Progress.TotalBytes` to DataDownload/PodVolumeRestore.
It is unlikely that `Status.Progress.TotalBytes` from DataUpload/PodVolumeBackup is unavailable, but once it happens, according to the above formula, `residentThresholdMB` is ignored, cache volume size is calculated directly from cache limit for the corresponding backup repository.
### Exposer
Cache volume configurations are retrieved by node-agent and passed through DataDownload/PodVolumeRestore to GenericRestore exposer/PodVolume exposer.
The exposers are responsible to calculate cache volume size, create cache PVCs and mount them to the restorePods.
If the calculated cache volume size is 0, or any of the critical parameters is missing (e.g., cache volume storage class), the exposers ignore the cache volume configuration and continue with creating restorePods without cache volumes, so no impact to the result of the restore.
Exposers mount the cache volume to a predefined directory and pass the directory to the data mover pods through the `cache-volume-path` parameter.
Below data structure is added to the exposers' expose parameters:
```go
type GenericRestoreExposeParam struct {
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *CacheVolumeInfo
}
type PodVolumeExposeParam struct {
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *repocache.CacheConfigs
}
type CacheConfigs struct {
// StorageClass specifies the storage class for cache volumes
StorageClass string
// Limit specifies the maximum size of the cache data
Limit int64
// ResidentThreshold specifies the minimum size of the cache data to create a cache volume
ResidentThreshold int64
}
```
### Data Mover Pods
Data mover pods retrieve the cache volume directory from `cache-volume-path` parameter and pass it to Unified Repository.
If the directory is empty, Unified Repository uses the resident location for data cache, that is, the root file system.
### Kopia Repository
Kopia repository supports cache directory configuration for both metadata and data. The existing `SetupConnectOptions` is modified to customize the `CacheDirectory`:
```go
func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions {
...
return repo.ConnectOptions{
CachingOptions: content.CachingOptions{
CacheDirectory: cacheDir,
...
},
...
}
}
```
[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
[2]: Implemented/vgdp-micro-service/vgdp-micro-service.md
[3]: Implemented/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md
[4]: Implemented/repo_maintenance_job_config.md
[5]: Implemented/backup-repo-config.md

View File

@@ -0,0 +1,115 @@
# Wildcard Namespace Support
## Abstract
Velero currently treats namespace patterns with glob characters as literal strings. This design adds wildcard expansion to support flexible namespace selection using patterns like `app-*` or `test-{dev,staging}`.
## Background
Requested in [#1874](https://github.com/vmware-tanzu/velero/issues/1874) for more flexible namespace selection.
## Goals
- Support glob pattern expansion in namespace includes/excludes
- Maintain backward compatibility with existing `*` behavior
## Non-Goals
- Complex regex patterns beyond basic globs
## High-Level Design
Wildcard expansion occurs early in both backup and restore flows, converting patterns to literal namespace lists before normal processing.
### Backup Flow
Expansion happens in `getResourceItems()` before namespace collection:
1. Check if wildcards exist using `ShouldExpandWildcards()`
2. Expand patterns against active cluster namespaces
3. Replace includes/excludes with expanded literal namespaces
4. Continue with normal backup processing
### Restore Flow
Expansion occurs in `execute()` after parsing backup contents:
1. Extract available namespaces from backup tar
2. Expand patterns against backup namespaces (not cluster namespaces)
3. Update restore context with expanded namespaces
4. Continue with normal restore processing
This ensures restore wildcards match actual backup contents, not current cluster state.
## Detailed Design
### Status Fields
Add wildcard expansion tracking to backup and restore CRDs:
```go
type WildcardNamespaceStatus struct {
// IncludeWildcardMatches records namespaces that matched include patterns
// +optional
IncludeWildcardMatches []string `json:"includeWildcardMatches,omitempty"`
// ExcludeWildcardMatches records namespaces that matched exclude patterns
// +optional
ExcludeWildcardMatches []string `json:"excludeWildcardMatches,omitempty"`
// WildcardResult records final namespaces after wildcard processing
// +optional
WildcardResult []string `json:"wildcardResult,omitempty"`
}
// Added to both BackupStatus and RestoreStatus
type BackupStatus struct {
// WildcardNamespaces contains wildcard expansion results
// +optional
WildcardNamespaces *WildcardNamespaceStatus `json:"wildcardNamespaces,omitempty"`
}
```
### Wildcard Expansion Package
New `pkg/util/wildcard/expand.go` package provides:
- `ShouldExpandWildcards()` - Skip expansion for simple "*" case
- `ExpandWildcards()` - Main expansion function using `github.com/gobwas/glob`
- Pattern validation rejecting unsupported regex symbols
**Supported patterns**: `*`, `?`, `[abc]`, `{a,b,c}`
**Unsupported**: `|()`, `**`
### Implementation Details
#### Backup Integration (`pkg/backup/item_collector.go`)
Expansion in `getResourceItems()`:
- Call `wildcard.ExpandWildcards()` with cluster namespaces
- Update `NamespaceIncludesExcludes` with expanded results
- Populate status fields with expansion results
#### Restore Integration (`pkg/restore/restore.go`)
Expansion in `execute()`:
```go
if wildcard.ShouldExpandWildcards(includes, excludes) {
availableNamespaces := extractNamespacesFromBackup(backupResources)
expandedIncludes, expandedExcludes, err := wildcard.ExpandWildcards(
availableNamespaces, includes, excludes)
// Update context and status
}
```
## Alternatives Considered
1. **Client-side expansion**: Rejected because it wouldn't work for scheduled backups
2. **Expansion in `collectNamespaces`**: Rejected because these functions expect literal namespaces
## Compatibility
Maintains full backward compatibility - existing "*" behavior unchanged.
## Implementation
Target: Velero 1.18

58
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.24
go 1.24.0
require (
cloud.google.com/go/storage v1.55.0
@@ -17,7 +17,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7
github.com/bombsimon/logrusr/v3 v3.0.0
github.com/evanphx/json-patch/v5 v5.9.0
github.com/evanphx/json-patch/v5 v5.9.11
github.com/fatih/color v1.18.0
github.com/gobwas/glob v0.2.3
github.com/google/go-cmp v0.7.0
@@ -27,8 +27,8 @@ require (
github.com/joho/godotenv v1.3.0
github.com/kopia/kopia v0.16.0
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0
github.com/onsi/ginkgo/v2 v2.19.0
github.com/onsi/gomega v1.33.1
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.36.1
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.22.0
@@ -49,17 +49,17 @@ require (
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.3
k8s.io/apiextensions-apiserver v0.31.3
k8s.io/apimachinery v0.31.3
k8s.io/cli-runtime v0.31.3
k8s.io/client-go v0.31.3
k8s.io/api v0.33.3
k8s.io/apiextensions-apiserver v0.33.3
k8s.io/apimachinery v0.33.3
k8s.io/cli-runtime v0.33.3
k8s.io/client-go v0.33.3
k8s.io/klog/v2 v2.130.1
k8s.io/kube-aggregator v0.31.3
k8s.io/metrics v0.31.3
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.19.3
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
k8s.io/kube-aggregator v0.33.3
k8s.io/metrics v0.33.3
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.21.0
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3
sigs.k8s.io/yaml v1.4.0
)
@@ -72,8 +72,8 @@ require (
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
@@ -91,6 +91,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6 // indirect
github.com/aws/smithy-go v1.19.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
@@ -101,32 +102,31 @@ require (
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
@@ -144,7 +144,7 @@ require (
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/minio-go/v7 v7.0.94 // indirect
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/moby/spdystream v0.4.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@@ -181,7 +181,7 @@ require (
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
@@ -193,9 +193,9 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
)
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777

106
go.sum
View File

@@ -84,8 +84,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
@@ -95,8 +95,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 h1:IEjq88XO4PuBDcvmjQJcQGg+w+UaafSy8G5Kcb5tBhI=
@@ -170,6 +170,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bombsimon/logrusr/v3 v3.0.0 h1:tcAoLfuAhKP9npBxWzSdpsvKPQt1XV02nSf2lZA82TQ=
github.com/bombsimon/logrusr/v3 v3.0.0/go.mod h1:PksPPgSFEL2I52pla2glgCyyd2OqOHAnFF5E+g8Ixco=
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
@@ -239,8 +241,8 @@ github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2T
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
@@ -282,8 +284,9 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
@@ -291,8 +294,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@@ -318,7 +321,6 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -350,8 +352,10 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -389,8 +393,8 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
@@ -413,8 +417,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
@@ -455,8 +459,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -550,8 +552,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -584,13 +586,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
@@ -804,8 +806,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1206,7 +1208,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1217,47 +1218,50 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE=
k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4=
k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8=
k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE=
k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs=
k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8=
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI=
k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI=
k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8=
k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA=
k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo=
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA=
k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-aggregator v0.31.3 h1:DqHPdTglJHgOfB884AaroyxrML/aL82ASYOh65m7MSk=
k8s.io/kube-aggregator v0.31.3/go.mod h1:Kx59Xjnf0SnY47qf9Or++4y3XCHQ3kR0xk1Di6KFiFU=
k8s.io/kube-aggregator v0.33.3 h1:Pa6hQpKJMX0p0D2wwcxXJgu02++gYcGWXoW1z1ZJDfo=
k8s.io/kube-aggregator v0.33.3/go.mod h1:hwvkUoQ8q6gv0+SgNnlmQ3eUue1zHhJKTHsX7BwxwSE=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/metrics v0.31.3 h1:DkT9I3gFlb2/z+/4BMY7WrQ/PnbukuV4Yli82v/KBCM=
k8s.io/metrics v0.31.3/go.mod h1:2w9gpd8z+13oJmaPR6p3kDyrDqnxSyoKpnOw2qLIdhI=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/metrics v0.33.3 h1:9CcqBz15JZfISqwca33gdHS8I6XfsK1vA8WUdEnG70g=
k8s.io/metrics v0.33.3/go.mod h1:Aw+cdg4AYHw0HvUY+lCyq40FOO84awrqvJRTw0cmXDs=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@@ -170,6 +170,9 @@ type SnapshotDataMovementInfo struct {
// Moved snapshot data size.
Size int64 `json:"size"`
// Moved snapshot incremental size.
IncrementalSize int64 `json:"incrementalSize,omitempty"`
// The DataUpload's Status.Phase value
Phase velerov2alpha1.DataUploadPhase
}
@@ -217,6 +220,9 @@ type PodVolumeInfo struct {
// The snapshot corresponding volume size.
Size int64 `json:"size,omitempty"`
// The incremental snapshot size.
IncrementalSize int64 `json:"incrementalSize,omitempty"`
// The type of the uploader that uploads the data. The valid values are `kopia` and `restic`.
UploaderType string `json:"uploaderType"`
@@ -240,14 +246,15 @@ type PodVolumeInfo struct {
func newPodVolumeInfoFromPVB(pvb *velerov1api.PodVolumeBackup) *PodVolumeInfo {
return &PodVolumeInfo{
SnapshotHandle: pvb.Status.SnapshotID,
Size: pvb.Status.Progress.TotalBytes,
UploaderType: pvb.Spec.UploaderType,
VolumeName: pvb.Spec.Volume,
PodName: pvb.Spec.Pod.Name,
PodNamespace: pvb.Spec.Pod.Namespace,
NodeName: pvb.Spec.Node,
Phase: pvb.Status.Phase,
SnapshotHandle: pvb.Status.SnapshotID,
Size: pvb.Status.Progress.TotalBytes,
IncrementalSize: pvb.Status.IncrementalBytes,
UploaderType: pvb.Spec.UploaderType,
VolumeName: pvb.Spec.Volume,
PodName: pvb.Spec.Pod.Name,
PodNamespace: pvb.Spec.Pod.Namespace,
NodeName: pvb.Spec.Node,
Phase: pvb.Status.Phase,
}
}

View File

@@ -118,6 +118,10 @@ type PodVolumeBackupStatus struct {
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// IncrementalBytes holds the number of bytes new or changed since the last backup
// +optional
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
// The server's time is used for AcceptedTimestamp
// +optional
@@ -134,6 +138,7 @@ type PodVolumeBackupStatus struct {
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"

View File

@@ -58,6 +58,10 @@ type PodVolumeRestoreSpec struct {
// Cancel indicates request to cancel the ongoing PodVolumeRestore. It can be set
// when the PodVolumeRestore is in InProgress phase
Cancel bool `json:"cancel,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.

View File

@@ -58,6 +58,10 @@ type DataDownloadSpec struct {
// NodeOS is OS of the node where the DataDownload is processed.
// +optional
NodeOS NodeOS `json:"nodeOS,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}
// TargetVolumeSpec is the specification for a target PVC.

View File

@@ -155,6 +155,10 @@ type DataUploadStatus struct {
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// IncrementalBytes holds the number of bytes new or changed since the last backup
// +optional
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
// Node is name of the node where the DataUpload is processed.
// +optional
Node string `json:"node,omitempty"`
@@ -185,6 +189,7 @@ type DataUploadStatus struct {
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed"
@@ -244,4 +249,8 @@ type DataUploadResult struct {
// NodeOS is OS of the node where the DataUpload is processed.
// +optional
NodeOS NodeOS `json:"nodeOS,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}

View File

@@ -366,7 +366,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
discoveryHelper: kb.discoveryHelper,
podVolumeBackupper: podVolumeBackupper,
podVolumeSnapshotTracker: podvolume.NewTracker(),
volumeSnapshotterGetter: volumeSnapshotterGetter,
volumeSnapshotterCache: NewVolumeSnapshotterCache(volumeSnapshotterGetter),
itemHookHandler: &hook.DefaultItemHookHandler{
PodCommandExecutor: kb.podCommandExecutor,
},
@@ -1198,6 +1198,7 @@ func updateVolumeInfos(
volumeInfos[index].SnapshotDataMovementInfo.SnapshotHandle = dataUpload.Status.SnapshotID
volumeInfos[index].SnapshotDataMovementInfo.RetainedSnapshot = dataUpload.Spec.CSISnapshot.VolumeSnapshot
volumeInfos[index].SnapshotDataMovementInfo.Size = dataUpload.Status.Progress.TotalBytes
volumeInfos[index].SnapshotDataMovementInfo.IncrementalSize = dataUpload.Status.IncrementalBytes
volumeInfos[index].SnapshotDataMovementInfo.Phase = dataUpload.Status.Phase
if dataUpload.Status.Phase == velerov2alpha1.DataUploadPhaseCompleted {

View File

@@ -3269,7 +3269,7 @@ func TestBackupWithSnapshots(t *testing.T) {
err := h.backupper.Backup(h.log, tc.req, backupFile, nil, nil, tc.snapshotterGetter)
require.NoError(t, err)
assert.Equal(t, tc.want, tc.req.VolumeSnapshots)
assert.Equal(t, tc.want, tc.req.VolumeSnapshots.Get())
})
}
}
@@ -4213,7 +4213,7 @@ func TestBackupWithPodVolume(t *testing.T) {
assert.Equal(t, tc.want, req.PodVolumeBackups)
// this assumes that we don't have any test cases where some PVs should be snapshotted using a VolumeSnapshotter
assert.Nil(t, req.VolumeSnapshots)
assert.Nil(t, req.VolumeSnapshots.Get())
})
}
}
@@ -5578,6 +5578,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseFailed).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
@@ -5603,6 +5604,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
RetainedSnapshot: "vs-1",
SnapshotHandle: "snapshot-id",
Size: 1000,
IncrementalSize: 500,
Phase: velerov2alpha1.DataUploadPhaseFailed,
},
},
@@ -5616,6 +5618,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
@@ -5641,6 +5644,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
RetainedSnapshot: "vs-1",
SnapshotHandle: "snapshot-id",
Size: 1000,
IncrementalSize: 500,
Phase: velerov2alpha1.DataUploadPhaseCompleted,
},
},
@@ -5655,6 +5659,7 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").

View File

@@ -70,13 +70,11 @@ type itemBackupper struct {
discoveryHelper discovery.Helper
podVolumeBackupper podvolume.Backupper
podVolumeSnapshotTracker *podvolume.Tracker
volumeSnapshotterGetter VolumeSnapshotterGetter
kubernetesBackupper *kubernetesBackupper
itemHookHandler hook.ItemHookHandler
snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter
hookTracker *hook.HookTracker
volumeHelperImpl volumehelper.VolumeHelper
volumeSnapshotterCache *VolumeSnapshotterCache
itemHookHandler hook.ItemHookHandler
hookTracker *hook.HookTracker
volumeHelperImpl volumehelper.VolumeHelper
}
type FileForArchive struct {
@@ -502,30 +500,6 @@ func (ib *itemBackupper) executeActions(
return obj, itemFiles, nil
}
// volumeSnapshotter instantiates and initializes a VolumeSnapshotter given a VolumeSnapshotLocation,
// or returns an existing one if one's already been initialized for the location.
func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) {
if bs, ok := ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name]; ok {
return bs, nil
}
bs, err := ib.volumeSnapshotterGetter.GetVolumeSnapshotter(snapshotLocation.Spec.Provider)
if err != nil {
return nil, err
}
if err := bs.Init(snapshotLocation.Spec.Config); err != nil {
return nil, err
}
if ib.snapshotLocationVolumeSnapshotters == nil {
ib.snapshotLocationVolumeSnapshotters = make(map[string]vsv1.VolumeSnapshotter)
}
ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name] = bs
return bs, nil
}
// zoneLabelDeprecated is the label that stores availability-zone info
// on PVs this is deprecated on Kubernetes >= 1.17.0
// zoneLabel is the label that stores availability-zone info
@@ -641,7 +615,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
for _, snapshotLocation := range ib.backupRequest.SnapshotLocations {
log := log.WithField("volumeSnapshotLocation", snapshotLocation.Name)
bs, err := ib.volumeSnapshotter(snapshotLocation)
bs, err := ib.volumeSnapshotterCache.SetNX(snapshotLocation)
if err != nil {
log.WithError(err).Error("Error getting volume snapshotter for volume snapshot location")
continue
@@ -699,7 +673,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
snapshot.Status.Phase = volume.SnapshotPhaseCompleted
snapshot.Status.ProviderSnapshotID = snapshotID
}
ib.backupRequest.VolumeSnapshots = append(ib.backupRequest.VolumeSnapshots, snapshot)
ib.backupRequest.VolumeSnapshots.Add(snapshot)
// nil errors are automatically removed
return kubeerrs.NewAggregate(errs)

View File

@@ -17,6 +17,8 @@ limitations under the License.
package backup
import (
"sync"
"github.com/vmware-tanzu/velero/internal/hook"
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
"github.com/vmware-tanzu/velero/internal/volume"
@@ -32,11 +34,27 @@ type itemKey struct {
name string
}
type SynchronizedVSList struct {
sync.Mutex
VolumeSnapshotList []*volume.Snapshot
}
func (s *SynchronizedVSList) Add(vs *volume.Snapshot) {
s.Lock()
defer s.Unlock()
s.VolumeSnapshotList = append(s.VolumeSnapshotList, vs)
}
func (s *SynchronizedVSList) Get() []*volume.Snapshot {
s.Lock()
defer s.Unlock()
return s.VolumeSnapshotList
}
// Request is a request for a backup, with all references to other objects
// materialized (e.g. backup/snapshot locations, includes/excludes, etc.)
type Request struct {
*velerov1api.Backup
StorageLocation *velerov1api.BackupStorageLocation
SnapshotLocations []*velerov1api.VolumeSnapshotLocation
NamespaceIncludesExcludes *collections.IncludesExcludes
@@ -44,7 +62,7 @@ type Request struct {
ResourceHooks []hook.ResourceHook
ResolvedActions []framework.BackupItemResolvedActionV2
ResolvedItemBlockActions []framework.ItemBlockResolvedAction
VolumeSnapshots []*volume.Snapshot
VolumeSnapshots SynchronizedVSList
PodVolumeBackups []*velerov1api.PodVolumeBackup
BackedUpItems *backedUpItemsMap
itemOperationsList *[]*itemoperation.BackupOperation
@@ -80,7 +98,7 @@ func (r *Request) FillVolumesInformation() {
}
r.VolumesInformation.SkippedPVs = skippedPVMap
r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots
r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots.Get()
r.VolumesInformation.PodVolumeBackups = r.PodVolumeBackups
r.VolumesInformation.BackupOperations = *r.GetItemOperationsList()
r.VolumesInformation.BackupName = r.Backup.Name

View File

@@ -0,0 +1,42 @@
package backup
import (
"sync"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
)
type VolumeSnapshotterCache struct {
cache map[string]vsv1.VolumeSnapshotter
mutex sync.Mutex
getter VolumeSnapshotterGetter
}
func NewVolumeSnapshotterCache(getter VolumeSnapshotterGetter) *VolumeSnapshotterCache {
return &VolumeSnapshotterCache{
cache: make(map[string]vsv1.VolumeSnapshotter),
getter: getter,
}
}
func (c *VolumeSnapshotterCache) SetNX(location *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
if snapshotter, exists := c.cache[location.Name]; exists {
return snapshotter, nil
}
snapshotter, err := c.getter.GetVolumeSnapshotter(location.Spec.Provider)
if err != nil {
return nil, err
}
if err := snapshotter.Init(location.Spec.Config); err != nil {
return nil, err
}
c.cache[location.Name] = snapshotter
return snapshotter, nil
}

View File

@@ -145,6 +145,12 @@ func (d *DataUploadBuilder) Progress(progress shared.DataMoveOperationProgress)
return d
}
// IncrementalBytes sets the DataUpload's IncrementalBytes.
func (d *DataUploadBuilder) IncrementalBytes(incrementalBytes int64) *DataUploadBuilder {
d.object.Status.IncrementalBytes = incrementalBytes
return d
}
// Node sets the DataUpload's Node.
func (d *DataUploadBuilder) Node(node string) *DataUploadBuilder {
d.object.Status.Node = node
@@ -180,3 +186,9 @@ func (d *DataUploadBuilder) Message(msg string) *DataUploadBuilder {
d.object.Status.Message = msg
return d
}
// TotalBytes sets the DataUpload's TotalBytes.
func (d *DataUploadBuilder) TotalBytes(size int64) *DataUploadBuilder {
d.object.Status.Progress.TotalBytes = size
return d
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package builder
import (
corev1api "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -87,3 +88,9 @@ func (b *StorageClassBuilder) Provisioner(provisioner string) *StorageClassBuild
b.object.Provisioner = provisioner
return b
}
// ReclaimPolicy sets StorageClass's reclaimPolicy.
func (b *StorageClassBuilder) ReclaimPolicy(policy corev1api.PersistentVolumeReclaimPolicy) *StorageClassBuilder {
b.object.ReclaimPolicy = &policy
return b
}

View File

@@ -53,6 +53,7 @@ type dataMoverRestoreConfig struct {
volumePath string
volumeMode string
ddName string
cacheDir string
resourceTimeout time.Duration
}
@@ -89,6 +90,7 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.volumeMode, "volume-mode", config.volumeMode, "The mode of the volume to be restored")
command.Flags().StringVar(&config.ddName, "data-download", config.ddName, "The data download name")
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
@@ -288,5 +290,5 @@ func (s *dataMoverRestore) createDataPathService() (dataPathService, error) {
return datamover.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.ddName, s.namespace, s.nodeName, datapath.AccessPoint{
ByPath: s.config.volumePath,
VolMode: uploader.PersistentVolumeMode(s.config.volumeMode),
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.logger), nil
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.config.cacheDir, s.logger), nil
}

View File

@@ -545,24 +545,22 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er
return fmt.Errorf("fail to create go-client %w", err)
}
// If either Linux or Windows node-agent is installed, and the node-agent-configmap
// is specified, need to validate the ConfigMap.
if (o.UseNodeAgent || o.UseNodeAgentWindows) && len(o.NodeAgentConfigMap) > 0 {
if len(o.NodeAgentConfigMap) > 0 {
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.NodeAgentConfigMap, &velerotypes.NodeAgentConfigs{}); err != nil {
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid", o.NodeAgentConfigMap)
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid: %w", o.NodeAgentConfigMap, err)
}
}
if len(o.RepoMaintenanceJobConfigMap) > 0 {
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.RepoMaintenanceJobConfigMap, &velerotypes.JobConfigs{}); err != nil {
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid", o.RepoMaintenanceJobConfigMap)
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid: %w", o.RepoMaintenanceJobConfigMap, err)
}
}
if len(o.BackupRepoConfigMap) > 0 {
config := make(map[string]any)
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.BackupRepoConfigMap, &config); err != nil {
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid", o.BackupRepoConfigMap)
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid: %w", o.BackupRepoConfigMap, err)
}
}

View File

@@ -60,6 +60,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@@ -84,6 +85,7 @@ type nodeAgentServerConfig struct {
resourceTimeout time.Duration
dataMoverPrepareTimeout time.Duration
nodeAgentConfig string
backupRepoConfig string
}
func NewServerCommand(f client.Factory) *cobra.Command {
@@ -121,6 +123,7 @@ func NewServerCommand(f client.Factory) *cobra.Command {
command.Flags().DurationVar(&config.dataMoverPrepareTimeout, "data-mover-prepare-timeout", config.dataMoverPrepareTimeout, "How long to wait for preparing a DataUpload/DataDownload. Default is 30 minutes.")
command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics")
command.Flags().StringVar(&config.nodeAgentConfig, "node-agent-configmap", config.nodeAgentConfig, "The name of ConfigMap containing node-agent configurations.")
command.Flags().StringVar(&config.backupRepoConfig, "backup-repository-configmap", config.backupRepoConfig, "The name of ConfigMap containing backup repository configurations.")
return command
}
@@ -140,7 +143,9 @@ type nodeAgentServer struct {
csiSnapshotClient *snapshotv1client.Clientset
dataPathMgr *datapath.Manager
dataPathConfigs *velerotypes.NodeAgentConfigs
backupRepoConfigs map[string]string
vgdpCounter *exposer.VgdpCounter
repoConfigMgr repository.ConfigManager
}
func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, config nodeAgentServerConfig) (*nodeAgentServer, error) {
@@ -234,6 +239,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
namespace: factory.Namespace(),
nodeName: nodeName,
metricsAddress: config.metricsAddress,
repoConfigMgr: repository.NewConfigManager(logger),
}
// the cache isn't initialized yet when "validatePodVolumesHostPath" is called, the client returned by the manager cannot
@@ -254,6 +260,11 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
if err := s.getDataPathConfigs(); err != nil {
return nil, err
}
if err := s.getBackupRepoConfigs(); err != nil {
return nil, err
}
s.dataPathMgr = datapath.NewManager(s.getDataPathConcurrentNum(defaultDataPathConcurrentNum))
return s, nil
@@ -308,6 +319,8 @@ func (s *nodeAgentServer) run() {
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
}
privilegedFsBackup := s.dataPathConfigs != nil && s.dataPathConfigs.PrivilegedFsBackup
podResources := corev1api.ResourceRequirements{}
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
@@ -327,12 +340,30 @@ func (s *nodeAgentServer) run() {
}
}
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass)
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
if err := s.validateCachePVCConfig(*s.dataPathConfigs.CachePVCConfig); err != nil {
s.logger.WithError(err).Warnf("Ignore cache config %v", s.dataPathConfigs.CachePVCConfig)
} else {
s.logger.Infof("Using cache volume configs %v", s.dataPathConfigs.CachePVCConfig)
}
}
var cachePVCConfig *velerotypes.CachePVC
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
cachePVCConfig = s.dataPathConfigs.CachePVCConfig
s.logger.Infof("Using customized cachePVC config %v", cachePVCConfig)
}
if s.backupRepoConfigs != nil {
s.logger.Infof("Using backup repo config %v", s.backupRepoConfigs)
}
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
}
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass)
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup)
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}
@@ -376,12 +407,15 @@ func (s *nodeAgentServer) run() {
s.vgdpCounter,
loadAffinity,
restorePVCConfig,
s.backupRepoConfigs,
cachePVCConfig,
podResources,
s.nodeName,
s.config.dataMoverPrepareTimeout,
s.logger,
s.metrics,
dataMovePriorityClass,
s.repoConfigMgr,
)
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
@@ -555,14 +589,32 @@ func (s *nodeAgentServer) getDataPathConfigs() error {
configs, err := getConfigsFunc(s.ctx, s.namespace, s.kubeClient, s.config.nodeAgentConfig)
if err != nil {
s.logger.WithError(err).Errorf("Failed to get node agent configs from configMap %s, ignore it", s.config.nodeAgentConfig)
return err
return errors.Wrapf(err, "error getting node agent configs from configMap %s", s.config.nodeAgentConfig)
}
s.dataPathConfigs = configs
return nil
}
func (s *nodeAgentServer) getBackupRepoConfigs() error {
if s.config.backupRepoConfig == "" {
s.logger.Info("No backup repo configMap is specified")
return nil
}
cm, err := s.kubeClient.CoreV1().ConfigMaps(s.namespace).Get(s.ctx, s.config.backupRepoConfig, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error getting backup repo configs from configMap %s", s.config.backupRepoConfig)
}
if cm.Data == nil {
return errors.Errorf("no data is in the backup repo configMap %s", s.config.backupRepoConfig)
}
s.backupRepoConfigs = cm.Data
return nil
}
func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
configs := s.dataPathConfigs
@@ -618,3 +670,20 @@ func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
return concurrentNum
}
func (s *nodeAgentServer) validateCachePVCConfig(config velerotypes.CachePVC) error {
if config.StorageClass == "" {
return errors.New("storage class is absent")
}
sc, err := s.kubeClient.StorageV1().StorageClasses().Get(s.ctx, config.StorageClass, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error getting storage class %s", config.StorageClass)
}
if sc.ReclaimPolicy != nil && *sc.ReclaimPolicy != corev1api.PersistentVolumeReclaimDelete {
return errors.Errorf("unexpected storage class reclaim policy %v", *sc.ReclaimPolicy)
}
return nil
}

View File

@@ -24,6 +24,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -34,6 +35,8 @@ import (
"github.com/vmware-tanzu/velero/pkg/nodeagent"
testutil "github.com/vmware-tanzu/velero/pkg/test"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
func Test_validatePodVolumesHostPath(t *testing.T) {
@@ -142,11 +145,10 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error)
configMapName string
expectConfigs *velerotypes.NodeAgentConfigs
expectLog string
expectedErr string
}{
{
name: "no config specified",
expectLog: "No node-agent configMap is specified",
name: "no config specified",
},
{
name: "failed to get configs",
@@ -154,7 +156,7 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
return nil, errors.New("fake-get-error")
},
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-get-error",
},
{
name: "configs cm not found",
@@ -162,7 +164,7 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
return nil, errors.New("fake-not-found-error")
},
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-not-found-error",
},
{
@@ -177,23 +179,21 @@ func Test_getDataPathConfigs(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logBuffer := ""
s := &nodeAgentServer{
config: nodeAgentServerConfig{
nodeAgentConfig: test.configMapName,
},
logger: testutil.NewSingleLogger(&logBuffer),
logger: testutil.NewLogger(),
}
getConfigsFunc = test.getFunc
s.getDataPathConfigs()
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
if test.expectLog == "" {
assert.Empty(t, logBuffer)
err := s.getDataPathConfigs()
if test.expectedErr == "" {
require.NoError(t, err)
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
} else {
assert.Contains(t, logBuffer, test.expectLog)
require.EqualError(t, err, test.expectedErr)
}
})
}
@@ -416,3 +416,117 @@ func Test_getDataPathConcurrentNum(t *testing.T) {
})
}
}
func TestGetBackupRepoConfigs(t *testing.T) {
cmNoData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Result()
cmWithData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Data("cacheLimit", "100").Result()
tests := []struct {
name string
configMapName string
kubeClientObj []runtime.Object
expectConfigs map[string]string
expectedErr string
}{
{
name: "no config specified",
},
{
name: "failed to get configs",
configMapName: "backup-repo-config",
expectedErr: "error getting backup repo configs from configMap backup-repo-config: configmaps \"backup-repo-config\" not found",
},
{
name: "configs data not found",
kubeClientObj: []runtime.Object{cmNoData},
configMapName: "backup-repo-config",
expectedErr: "no data is in the backup repo configMap backup-repo-config",
},
{
name: "succeed",
configMapName: "backup-repo-config",
kubeClientObj: []runtime.Object{cmWithData},
expectConfigs: map[string]string{"cacheLimit": "100"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
s := &nodeAgentServer{
namespace: velerov1api.DefaultNamespace,
kubeClient: fakeKubeClient,
config: nodeAgentServerConfig{
backupRepoConfig: test.configMapName,
},
logger: testutil.NewLogger(),
}
err := s.getBackupRepoConfigs()
if test.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, test.expectConfigs, s.backupRepoConfigs)
} else {
require.EqualError(t, err, test.expectedErr)
}
})
}
}
func TestValidateCachePVCConfig(t *testing.T) {
scWithRetainPolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()
scWithDeletePolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result()
scWithNoPolicy := builder.ForStorageClass("fake-storage-class").Result()
tests := []struct {
name string
config velerotypes.CachePVC
kubeClientObj []runtime.Object
expectedErr string
}{
{
name: "no storage class",
expectedErr: "storage class is absent",
},
{
name: "failed to get storage class",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
expectedErr: "error getting storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found",
},
{
name: "storage class reclaim policy is not expected",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithRetainPolicy},
expectedErr: "unexpected storage class reclaim policy Retain",
},
{
name: "storage class reclaim policy is delete",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithDeletePolicy},
},
{
name: "storage class with no reclaim policy",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithNoPolicy},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
s := &nodeAgentServer{
kubeClient: fakeKubeClient,
}
err := s.validateCachePVCConfig(test.config)
if test.expectedErr == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, test.expectedErr)
}
})
}
}

View File

@@ -51,6 +51,7 @@ import (
type podVolumeRestoreConfig struct {
volumePath string
pvrName string
cacheDir string
resourceTimeout time.Duration
}
@@ -86,6 +87,7 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.pvrName, "pod-volume-restore", config.pvrName, "The PVR name")
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
@@ -294,5 +296,5 @@ func (s *podVolumeRestore) createDataPathService() (dataPathService, error) {
return podvolume.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.pvrName, s.namespace, s.nodeName, datapath.AccessPoint{
ByPath: s.config.volumePath,
VolMode: uploader.PersistentVolumeFilesystem,
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.logger), nil
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.config.cacheDir, s.logger), nil
}

View File

@@ -713,6 +713,9 @@ func describeDataMovement(d *Describer, details bool, info *volume.BackupVolumeI
d.Printf("\t\t\t\tData Mover: %s\n", dataMover)
d.Printf("\t\t\t\tUploader Type: %s\n", info.SnapshotDataMovementInfo.UploaderType)
d.Printf("\t\t\t\tMoved data Size (bytes): %d\n", info.SnapshotDataMovementInfo.Size)
if info.SnapshotDataMovementInfo.IncrementalSize > 0 {
d.Printf("\t\t\t\tIncremental data Size (bytes): %d\n", info.SnapshotDataMovementInfo.IncrementalSize)
}
d.Printf("\t\t\t\tResult: %s\n", info.Result)
} else {
d.Printf("\t\t\tData Movement: %s\n", "included, specify --details for more information")
@@ -835,7 +838,7 @@ func describePodVolumeBackups(d *Describer, details bool, podVolumeBackups []vel
backupsByPod := new(volumesByPod)
for _, backup := range backupsByPhase[phase] {
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress)
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress, backup.Status.IncrementalBytes)
}
d.Printf("\t\t%s:\n", phase)
@@ -885,7 +888,8 @@ type volumesByPod struct {
// Add adds a pod volume with the specified pod namespace, name
// and volume to the appropriate group.
func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veleroapishared.DataMoveOperationProgress) {
// Used for both backup and restore
func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veleroapishared.DataMoveOperationProgress, incrementalBytes int64) {
if v.volumesByPodMap == nil {
v.volumesByPodMap = make(map[string]*podVolumeGroup)
}
@@ -895,6 +899,12 @@ func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veler
// append backup progress percentage if backup is in progress
if phase == "In Progress" && progress.TotalBytes != 0 {
volume = fmt.Sprintf("%s (%.2f%%)", volume, float64(progress.BytesDone)/float64(progress.TotalBytes)*100)
} else if phase == string(velerov1api.PodVolumeBackupPhaseCompleted) && incrementalBytes > 0 {
volume = fmt.Sprintf("%s (size: %v, incremental size: %v)", volume, progress.TotalBytes, incrementalBytes)
} else if (phase == string(velerov1api.PodVolumeBackupPhaseCompleted) ||
phase == string(velerov1api.PodVolumeRestorePhaseCompleted)) &&
progress.TotalBytes > 0 {
volume = fmt.Sprintf("%s (size: %v)", volume, progress.TotalBytes)
}
if group, ok := v.volumesByPodMap[key]; !ok {

View File

@@ -597,11 +597,12 @@ func TestCSISnapshots(t *testing.T) {
Result: volume.VolumeResultFailed,
SnapshotDataMoved: true,
SnapshotDataMovementInfo: &volume.SnapshotDataMovementInfo{
UploaderType: "fake-uploader",
SnapshotHandle: "fake-repo-id-5",
OperationID: "fake-operation-5",
Size: 100,
Phase: velerov2alpha1.DataUploadPhaseFailed,
UploaderType: "fake-uploader",
SnapshotHandle: "fake-repo-id-5",
OperationID: "fake-operation-5",
Size: 100,
IncrementalSize: 50,
Phase: velerov2alpha1.DataUploadPhaseFailed,
},
},
},
@@ -613,6 +614,7 @@ func TestCSISnapshots(t *testing.T) {
Data Mover: velero
Uploader Type: fake-uploader
Moved data Size (bytes): 100
Incremental data Size (bytes): 50
Result: failed
`,
},

View File

@@ -464,6 +464,10 @@ func describeDataMovementInSF(details bool, info *volume.BackupVolumeInfo, snaps
dataMovement["uploaderType"] = info.SnapshotDataMovementInfo.UploaderType
dataMovement["result"] = string(info.Result)
if info.SnapshotDataMovementInfo.Size > 0 || info.SnapshotDataMovementInfo.IncrementalSize > 0 {
dataMovement["size"] = info.SnapshotDataMovementInfo.Size
dataMovement["incrementalSize"] = info.SnapshotDataMovementInfo.IncrementalSize
}
snapshotDetail["dataMovement"] = dataMovement
} else {
@@ -534,7 +538,7 @@ func describePodVolumeBackupsInSF(backups []velerov1api.PodVolumeBackup, details
// group the backups in the current phase by pod (i.e. "ns/name")
backupsByPod := new(volumesByPod)
for _, backup := range backupsByPhase[phase] {
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress)
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress, backup.Status.IncrementalBytes)
}
backupsByPods := make([]map[string]string, 0)

View File

@@ -408,7 +408,7 @@ func describePodVolumeRestores(d *Describer, restores []velerov1api.PodVolumeRes
restoresByPod := new(volumesByPod)
for _, restore := range restoresByPhase[phase] {
restoresByPod.Add(restore.Spec.Pod.Namespace, restore.Spec.Pod.Name, restore.Spec.Volume, phase, restore.Status.Progress)
restoresByPod.Add(restore.Spec.Pod.Namespace, restore.Spec.Pod.Name, restore.Spec.Volume, phase, restore.Status.Progress, 0)
}
d.Printf("\t%s:\n", phase)

View File

@@ -734,8 +734,8 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error {
// native snapshots phase will either be failed or completed right away
// https://github.com/vmware-tanzu/velero/blob/de3ea52f0cc478e99efa7b9524c7f353514261a4/pkg/backup/item_backupper.go#L632-L639
backup.Status.VolumeSnapshotsAttempted = len(backup.VolumeSnapshots)
for _, snap := range backup.VolumeSnapshots {
backup.Status.VolumeSnapshotsAttempted = len(backup.VolumeSnapshots.Get())
for _, snap := range backup.VolumeSnapshots.Get() {
if snap.Status.Phase == volume.SnapshotPhaseCompleted {
backup.Status.VolumeSnapshotsCompleted++
}
@@ -882,7 +882,7 @@ func persistBackup(backup *pkgbackup.Request,
}
// Velero-native volume snapshots (as opposed to CSI ones)
nativeVolumeSnapshots, errs := encode.ToJSONGzip(backup.VolumeSnapshots, "native volumesnapshots list")
nativeVolumeSnapshots, errs := encode.ToJSONGzip(backup.VolumeSnapshots.Get(), "native volumesnapshots list")
if errs != nil {
persistErrs = append(persistErrs, errs...)
}

View File

@@ -608,7 +608,7 @@ func getBackupRepositoryConfig(ctx context.Context, ctrlClient client.Client, co
jsonData, found := loc.Data[repoType]
if !found {
log.Info("No data for repo type %s in config map %s", repoType, configName)
log.Infof("No data for repo type %s in config map %s", repoType, configName)
return nil, nil
}

View File

@@ -1047,7 +1047,7 @@ func TestRecallMaintenance(t *testing.T) {
{
name: "wait completion error",
runtimeScheme: schemeFail,
expectedErr: "error waiting incomplete repo maintenance job for repo repo: error listing maintenance job for repo repo: no kind is registered for the type v1.JobList in scheme \"pkg/runtime/scheme.go:100\"",
expectedErr: "error waiting incomplete repo maintenance job for repo repo: error listing maintenance job for repo repo: no kind is registered for the type v1.JobList in scheme",
},
{
name: "no consolidate result",
@@ -1105,7 +1105,7 @@ func TestRecallMaintenance(t *testing.T) {
err := r.recallMaintenance(t.Context(), backupRepo, velerotest.NewLogger())
if test.expectedErr != "" {
assert.EqualError(t, err, test.expectedErr)
assert.ErrorContains(t, err, test.expectedErr)
} else {
assert.NoError(t, err)

View File

@@ -49,6 +49,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
@@ -68,11 +69,14 @@ type DataDownloadReconciler struct {
vgdpCounter *exposer.VgdpCounter
loadAffinity []*kube.LoadAffinity
restorePVCConfig velerotypes.RestorePVC
backupRepoConfigs map[string]string
cacheVolumeConfigs *velerotypes.CachePVC
podResources corev1api.ResourceRequirements
preparingTimeout time.Duration
metrics *metrics.ServerMetrics
cancelledDataDownload map[string]time.Time
dataMovePriorityClass string
repoConfigMgr repository.ConfigManager
}
func NewDataDownloadReconciler(
@@ -83,12 +87,15 @@ func NewDataDownloadReconciler(
counter *exposer.VgdpCounter,
loadAffinity []*kube.LoadAffinity,
restorePVCConfig velerotypes.RestorePVC,
backupRepoConfigs map[string]string,
cacheVolumeConfigs *velerotypes.CachePVC,
podResources corev1api.ResourceRequirements,
nodeName string,
preparingTimeout time.Duration,
logger logrus.FieldLogger,
metrics *metrics.ServerMetrics,
dataMovePriorityClass string,
repoConfigMgr repository.ConfigManager,
) *DataDownloadReconciler {
return &DataDownloadReconciler{
client: client,
@@ -99,6 +106,8 @@ func NewDataDownloadReconciler(
nodeName: nodeName,
restoreExposer: exposer.NewGenericRestoreExposer(kubeClient, logger),
restorePVCConfig: restorePVCConfig,
backupRepoConfigs: backupRepoConfigs,
cacheVolumeConfigs: cacheVolumeConfigs,
dataPathMgr: dataPathMgr,
vgdpCounter: counter,
loadAffinity: loadAffinity,
@@ -107,6 +116,7 @@ func NewDataDownloadReconciler(
metrics: metrics,
cancelledDataDownload: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
repoConfigMgr: repoConfigMgr,
}
}
@@ -882,6 +892,19 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
}
}
var cacheVolume *exposer.CacheConfigs
if r.cacheVolumeConfigs != nil {
if limit, err := r.repoConfigMgr.ClientSideCacheLimit(velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs); err != nil {
log.WithError(err).Warnf("Failed to get client side cache limit for repo type %s from configs %v", velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs)
} else {
cacheVolume = &exposer.CacheConfigs{
Limit: limit,
StorageClass: r.cacheVolumeConfigs.StorageClass,
ResidentThreshold: r.cacheVolumeConfigs.ResidentThreshold,
}
}
}
return exposer.GenericRestoreExposeParam{
TargetPVCName: dd.Spec.TargetVolume.PVC,
TargetNamespace: dd.Spec.TargetVolume.Namespace,
@@ -895,6 +918,8 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
RestorePVCConfig: r.restorePVCConfig,
LoadAffinity: r.loadAffinity,
PriorityClassName: r.dataMovePriorityClass,
RestoreSize: dd.Spec.SnapshotSize,
CacheVolume: cacheVolume,
}, nil
}

View File

@@ -129,7 +129,7 @@ func initDataDownloadReconcilerWithError(t *testing.T, objects []any, needError
dataPathMgr := datapath.NewManager(1)
return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), ""), nil
return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, nil, nil, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), "", nil), nil
}
func TestDataDownloadReconcile(t *testing.T) {

View File

@@ -493,6 +493,7 @@ func (r *DataUploadReconciler) OnDataUploadCompleted(ctx context.Context, namesp
du.Status.Path = result.Backup.Source.ByPath
du.Status.Phase = velerov2alpha1api.DataUploadPhaseCompleted
du.Status.SnapshotID = result.Backup.SnapshotID
du.Status.IncrementalBytes = result.Backup.IncrementalBytes
du.Status.CompletionTimestamp = &metav1.Time{Time: r.Clock.Now()}
if result.Backup.EmptySnapshot {
du.Status.Message = "volume was empty so no data was upload"
@@ -916,6 +917,13 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
}
pv := &corev1api.PersistentVolume{}
if err := r.client.Get(context.Background(), types.NamespacedName{
Name: pvc.Spec.VolumeName,
}, pv); err != nil {
return nil, errors.Wrapf(err, "failed to get source PV %s", pvc.Spec.VolumeName)
}
nodeOS := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log)
if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil {
@@ -963,6 +971,8 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
return &exposer.CSISnapshotExposeParam{
SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot,
SourceNamespace: du.Spec.SourceNamespace,
SourcePVCName: pvc.Name,
SourcePVName: pv.Name,
StorageClass: du.Spec.CSISnapshot.StorageClass,
HostingPodLabels: hostingPodLabels,
HostingPodAnnotations: hostingPodAnnotation,

View File

@@ -850,11 +850,20 @@ func TestOnDataUploadCompleted(t *testing.T) {
// Add the DataUpload object to the fake client
require.NoError(t, r.client.Create(ctx, du))
r.snapshotExposerList = map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{velerov2alpha1api.SnapshotTypeCSI: exposer.NewCSISnapshotExposer(r.kubeClient, r.csiSnapshotClient, velerotest.NewLogger())}
r.OnDataUploadCompleted(ctx, namespace, duName, datapath.Result{})
r.OnDataUploadCompleted(ctx, namespace, duName, datapath.Result{
Backup: datapath.BackupResult{
SnapshotID: "fake-id",
Source: datapath.AccessPoint{
ByPath: "fake-path",
},
},
})
updatedDu := &velerov2alpha1api.DataUpload{}
require.NoError(t, r.client.Get(ctx, types.NamespacedName{Name: duName, Namespace: namespace}, updatedDu))
assert.Equal(t, velerov2alpha1api.DataUploadPhaseCompleted, updatedDu.Status.Phase)
assert.False(t, updatedDu.Status.CompletionTimestamp.IsZero())
assert.Equal(t, "fake-id", updatedDu.Status.SnapshotID)
assert.Equal(t, "fake-path", updatedDu.Status.Path)
}
func TestFindDataUploadForPod(t *testing.T) {

View File

@@ -60,7 +60,7 @@ const (
// NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance
func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeBackupReconciler {
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeBackupReconciler {
return &PodVolumeBackupReconciler{
client: client,
mgr: mgr,
@@ -77,6 +77,7 @@ func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kub
exposer: exposer.NewPodVolumeExposer(kubeClient, logger),
cancelledPVB: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
privileged: privileged,
}
}
@@ -97,6 +98,7 @@ type PodVolumeBackupReconciler struct {
resourceTimeout time.Duration
cancelledPVB map[string]time.Time
dataMovePriorityClass string
privileged bool
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
@@ -524,6 +526,7 @@ func (r *PodVolumeBackupReconciler) OnDataPathCompleted(ctx context.Context, nam
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseCompleted
pvb.Status.SnapshotID = result.Backup.SnapshotID
pvb.Status.CompletionTimestamp = &completionTime
pvb.Status.IncrementalBytes = result.Backup.IncrementalBytes
if result.Backup.EmptySnapshot {
pvb.Status.Message = "volume was empty so no snapshot was taken"
}
@@ -837,6 +840,7 @@ func (r *PodVolumeBackupReconciler) setupExposeParam(pvb *velerov1api.PodVolumeB
Resources: r.podResources,
// Priority class name for the data mover pod, retrieved from node-agent-configmap
PriorityClassName: r.dataMovePriorityClass,
Privileged: r.privileged,
}
}

View File

@@ -151,7 +151,8 @@ func initPVBReconcilerWithError(needError ...error) (*PodVolumeBackupReconciler,
corev1api.ResourceRequirements{},
metrics.NewServerMetrics(),
velerotest.NewLogger(),
"", // dataMovePriorityClass
"", // dataMovePriorityClass
false, // privileged
), nil
}

View File

@@ -56,7 +56,7 @@ import (
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeRestoreReconciler {
logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeRestoreReconciler {
return &PodVolumeRestoreReconciler{
client: client,
mgr: mgr,
@@ -72,6 +72,7 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
exposer: exposer.NewPodVolumeExposer(kubeClient, logger),
cancelledPVR: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
privileged: privileged,
}
}
@@ -90,6 +91,7 @@ type PodVolumeRestoreReconciler struct {
resourceTimeout time.Duration
cancelledPVR map[string]time.Time
dataMovePriorityClass string
privileged bool
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
@@ -896,6 +898,7 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
Resources: r.podResources,
// Priority class name for the data mover pod, retrieved from node-agent-configmap
PriorityClassName: r.dataMovePriorityClass,
Privileged: r.privileged,
}
}

View File

@@ -617,7 +617,7 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
dataPathMgr := datapath.NewManager(1)
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), ""), nil
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false), nil
}
func TestPodVolumeRestoreReconcile(t *testing.T) {

View File

@@ -229,7 +229,7 @@ func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Sch
}
for _, backup := range backupList.Items {
if backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress {
if backup.Status.Phase == "" || backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress {
log.Debugf("%s/%s still has backups that are in InProgress or New...", schedule.Namespace, schedule.Name)
return true
}

View File

@@ -149,6 +149,13 @@ func TestReconcileOfSchedule(t *testing.T) {
expectedPhase: string(velerov1.SchedulePhaseEnabled),
backup: builder.ForBackup("ns", "name-20220905120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Phase(velerov1.BackupPhaseNew).Result(),
},
{
name: "schedule already has backup with empty phase (not yet reconciled).",
schedule: newScheduleBuilder(velerov1.SchedulePhaseEnabled).CronSchedule("@every 5m").LastBackupTime("2000-01-01 00:00:00").Result(),
fakeClockTime: "2017-01-01 12:00:00",
expectedPhase: string(velerov1.SchedulePhaseEnabled),
backup: builder.ForBackup("ns", "name-20220905120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Phase("").Result(),
},
}
for _, test := range tests {
@@ -215,10 +222,10 @@ func TestReconcileOfSchedule(t *testing.T) {
backups := &velerov1.BackupList{}
require.NoError(t, client.List(ctx, backups))
// If backup associated with schedule's status is in New or InProgress,
// If backup associated with schedule's status is in New or InProgress or empty phase,
// new backup shouldn't be submitted.
if test.backup != nil &&
(test.backup.Status.Phase == velerov1.BackupPhaseNew || test.backup.Status.Phase == velerov1.BackupPhaseInProgress) {
(test.backup.Status.Phase == "" || test.backup.Status.Phase == velerov1.BackupPhaseNew || test.backup.Status.Phase == velerov1.BackupPhaseInProgress) {
assert.Len(t, backups.Items, 1)
require.NoError(t, client.Delete(ctx, test.backup))
}
@@ -479,4 +486,19 @@ func TestCheckIfBackupInNewOrProgress(t *testing.T) {
reconciler = NewScheduleReconciler("namespace", logger, client, metrics.NewServerMetrics(), false)
result = reconciler.checkIfBackupInNewOrProgress(testSchedule)
assert.True(t, result)
// Clean backup in InProgress phase.
err = client.Delete(ctx, inProgressBackup)
require.NoError(t, err, "fail to delete backup in InProgress phase in TestCheckIfBackupInNewOrProgress: %v", err)
// Create backup with empty phase (not yet reconciled).
emptyPhaseBackup := builder.ForBackup("ns", "backup-3").
ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).
Phase("").Result()
err = client.Create(ctx, emptyPhaseBackup)
require.NoError(t, err, "fail to create backup with empty phase in TestCheckIfBackupInNewOrProgress: %v", err)
reconciler = NewScheduleReconciler("namespace", logger, client, metrics.NewServerMetrics(), false)
result = reconciler.checkIfBackupInNewOrProgress(testSchedule)
assert.True(t, result)
}

View File

@@ -156,7 +156,7 @@ func TestOnDataUploadCompleted(t *testing.T) {
{
name: "marshal fail",
marshalErr: errors.New("fake-marshal-error"),
expectedErr: "Failed to marshal backup result { false { } 0}: fake-marshal-error",
expectedErr: "Failed to marshal backup result { false { } 0 0}: fake-marshal-error",
},
{
name: "succeed",

View File

@@ -61,11 +61,12 @@ type RestoreMicroService struct {
ddInformer cache.Informer
ddHandler cachetool.ResourceEventHandlerRegistration
nodeName string
cacheDir string
}
func NewRestoreMicroService(ctx context.Context, client client.Client, kubeClient kubernetes.Interface, dataDownloadName string, namespace string, nodeName string,
sourceTargetPath datapath.AccessPoint, dataPathMgr *datapath.Manager, repoEnsurer *repository.Ensurer, cred *credentials.CredentialGetter,
ddInformer cache.Informer, log logrus.FieldLogger) *RestoreMicroService {
ddInformer cache.Informer, cacheDir string, log logrus.FieldLogger) *RestoreMicroService {
return &RestoreMicroService{
ctx: ctx,
client: client,
@@ -80,6 +81,7 @@ func NewRestoreMicroService(ctx context.Context, client client.Client, kubeClien
nodeName: nodeName,
resultSignal: make(chan dataPathResult),
ddInformer: ddInformer,
cacheDir: cacheDir,
}
}
@@ -172,6 +174,7 @@ func (r *RestoreMicroService) RunCancelableDataPath(ctx context.Context) (string
RepoIdentifier: "",
RepositoryEnsurer: r.repoEnsurer,
CredentialGetter: r.credentialGetter,
CacheDir: r.cacheDir,
}); err != nil {
return "", errors.Wrap(err, "error to initialize data path")
}

View File

@@ -44,6 +44,7 @@ type FSBRInitParam struct {
RepositoryEnsurer *repository.Ensurer
CredentialGetter *credentials.CredentialGetter
Filesystem filesystem.Interface
CacheDir string
}
// FSBRStartParam define the input param for FSBR start
@@ -112,7 +113,7 @@ func (fs *fileSystemBR) Init(ctx context.Context, param any) error {
return errors.Wrapf(err, "error to ensure backup repository %s-%s-%s", initParam.BSLName, initParam.SourceNamespace, initParam.RepositoryType)
}
err = fs.boostRepoConnect(ctx, initParam.RepositoryType, initParam.CredentialGetter)
err = fs.boostRepoConnect(ctx, initParam.RepositoryType, initParam.CredentialGetter, initParam.CacheDir)
if err != nil {
return errors.Wrapf(err, "error to boost backup repository connection %s-%s-%s", initParam.BSLName, initParam.SourceNamespace, initParam.RepositoryType)
}
@@ -181,7 +182,7 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, uploaderConfig map[strin
fs.wgDataPath.Done()
}()
snapshotID, emptySnapshot, totalBytes, err := fs.uploaderProv.RunBackup(fs.ctx, source.ByPath, backupParam.RealSource, backupParam.Tags, backupParam.ForceFull,
snapshotID, emptySnapshot, totalBytes, incrementalBytes, err := fs.uploaderProv.RunBackup(fs.ctx, source.ByPath, backupParam.RealSource, backupParam.Tags, backupParam.ForceFull,
backupParam.ParentSnapshot, source.VolMode, uploaderConfig, fs)
if err == provider.ErrorCanceled {
@@ -193,7 +194,7 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, uploaderConfig map[strin
}
fs.callbacks.OnFailed(context.Background(), fs.namespace, fs.jobName, dataPathErr)
} else {
fs.callbacks.OnCompleted(context.Background(), fs.namespace, fs.jobName, Result{Backup: BackupResult{snapshotID, emptySnapshot, source, totalBytes}})
fs.callbacks.OnCompleted(context.Background(), fs.namespace, fs.jobName, Result{Backup: BackupResult{snapshotID, emptySnapshot, source, totalBytes, incrementalBytes}})
}
}()
@@ -245,9 +246,9 @@ func (fs *fileSystemBR) Cancel() {
fs.log.WithField("user", fs.jobName).Info("FileSystemBR is canceled")
}
func (fs *fileSystemBR) boostRepoConnect(ctx context.Context, repositoryType string, credentialGetter *credentials.CredentialGetter) error {
func (fs *fileSystemBR) boostRepoConnect(ctx context.Context, repositoryType string, credentialGetter *credentials.CredentialGetter, cacheDir string) error {
if repositoryType == velerov1api.BackupRepositoryTypeKopia {
if err := repoProvider.NewUnifiedRepoProvider(*credentialGetter, repositoryType, fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo}); err != nil {
if err := repoProvider.NewUnifiedRepoProvider(*credentialGetter, repositoryType, fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo, CacheDir: cacheDir}); err != nil {
return err
}
} else {

View File

@@ -96,7 +96,7 @@ func TestAsyncBackup(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fs := newFileSystemBR("job-1", "test", nil, "velero", Callbacks{}, velerotest.NewLogger()).(*fileSystemBR)
mockProvider := providerMock.NewProvider(t)
mockProvider.On("RunBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(test.result.Backup.SnapshotID, test.result.Backup.EmptySnapshot, test.result.Backup.TotalBytes, test.err)
mockProvider.On("RunBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(test.result.Backup.SnapshotID, test.result.Backup.EmptySnapshot, test.result.Backup.TotalBytes, test.result.Backup.IncrementalBytes, test.err)
mockProvider.On("Close", mock.Anything).Return(nil)
fs.uploaderProv = mockProvider
fs.initialized = true

View File

@@ -30,10 +30,11 @@ type Result struct {
// BackupResult represents the result of a backup
type BackupResult struct {
SnapshotID string `json:"snapshotID"`
EmptySnapshot bool `json:"emptySnapshot"`
Source AccessPoint `json:"source,omitempty"`
TotalBytes int64 `json:"totalBytes,omitempty"`
SnapshotID string `json:"snapshotID"`
EmptySnapshot bool `json:"emptySnapshot"`
Source AccessPoint `json:"source,omitempty"`
TotalBytes int64 `json:"totalBytes,omitempty"`
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
}
// RestoreResult represents the result of a restore

View File

@@ -0,0 +1,99 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exposer
import (
"context"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
type CacheConfigs struct {
Limit int64
StorageClass string
ResidentThreshold int64
}
const (
cacheVolumeName = "cachedir"
cacheVolumeDirSuffix = "-cache"
)
func createCachePVC(ctx context.Context, pvcClient corev1client.CoreV1Interface, ownerObject corev1api.ObjectReference, sc string, size int64, selectedNode string) (*corev1api.PersistentVolumeClaim, error) {
cachePVCName := getCachePVCName(ownerObject)
volumeMode := corev1api.PersistentVolumeFilesystem
pvcObj := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: ownerObject.Namespace,
Name: cachePVCName,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ownerObject.APIVersion,
Kind: ownerObject.Kind,
Name: ownerObject.Name,
UID: ownerObject.UID,
Controller: boolptr.True(),
},
},
},
Spec: corev1api.PersistentVolumeClaimSpec{
AccessModes: []corev1api.PersistentVolumeAccessMode{corev1api.ReadWriteOnce},
StorageClassName: &sc,
VolumeMode: &volumeMode,
Resources: corev1api.VolumeResourceRequirements{
Requests: corev1api.ResourceList{
corev1api.ResourceStorage: *resource.NewQuantity(size, resource.BinarySI),
},
},
},
}
if selectedNode != "" {
pvcObj.Annotations = map[string]string{
kube.KubeAnnSelectedNode: selectedNode,
}
}
return pvcClient.PersistentVolumeClaims(pvcObj.Namespace).Create(ctx, pvcObj, metav1.CreateOptions{})
}
func getCachePVCName(ownerObject corev1api.ObjectReference) string {
return ownerObject.Name + cacheVolumeDirSuffix
}
func getCacheVolumeSize(dataSize int64, info *CacheConfigs) int64 {
if info == nil {
return 0
}
if dataSize != 0 && dataSize < info.ResidentThreshold {
return 0
}
// 20% inflate and round up to GB
volumeSize := (info.Limit*12/10 + (1 << 30) - 1) / (1 << 30) * (1 << 30)
return volumeSize
}

View File

@@ -0,0 +1,80 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exposer
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetCacheVolumeSize(t *testing.T) {
tests := []struct {
name string
dataSize int64
info *CacheConfigs
expected int64
}{
{
name: "nil info",
dataSize: 1024,
expected: 0,
},
{
name: "0 data size",
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 5120},
expected: 2 << 30,
},
{
name: "0 threshold",
dataSize: 2048,
info: &CacheConfigs{Limit: 1 << 30},
expected: 2 << 30,
},
{
name: "data size is smaller",
dataSize: 2048,
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 5120},
expected: 0,
},
{
name: "data size is lager",
dataSize: 2048,
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 1024},
expected: 2 << 30,
},
{
name: "limit smaller than 1G",
dataSize: 2048,
info: &CacheConfigs{Limit: 5120, ResidentThreshold: 1024},
expected: 1 << 30,
},
{
name: "larger than 1G after inflate",
dataSize: 2048,
info: &CacheConfigs{Limit: (1 << 30) - 1024, ResidentThreshold: 1024},
expected: 2 << 30,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
size := getCacheVolumeSize(test.dataSize, test.info)
require.Equal(t, test.expected, size)
})
}
}

View File

@@ -35,6 +35,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/nodeagent"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/csi"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@@ -48,6 +49,12 @@ type CSISnapshotExposeParam struct {
// SourceNamespace is the original namespace of the volume that the snapshot is taken for
SourceNamespace string
// SourcePVCName is the original name of the PVC that the snapshot is taken for
SourcePVCName string
// SourcePVName is the name of PV for SourcePVC
SourcePVName string
// AccessMode defines the mode to access the snapshot
AccessMode string
@@ -189,6 +196,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
backupPVCReadOnly := false
spcNoRelabeling := false
backupPVCAnnotations := map[string]string{}
intoleratableNodes := []string{}
if value, exists := csiExposeParam.BackupPVCConfig[csiExposeParam.StorageClass]; exists {
if value.StorageClass != "" {
backupPVCStorageClass = value.StorageClass
@@ -206,6 +214,15 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
if len(value.Annotations) > 0 {
backupPVCAnnotations = value.Annotations
}
if _, found := backupPVCAnnotations[util.VSphereCNSFastCloneAnno]; found {
if n, err := kube.GetPVAttachedNodes(ctx, csiExposeParam.SourcePVName, e.kubeClient.StorageV1()); err != nil {
curLog.WithField("source PV", csiExposeParam.SourcePVName).WithError(err).Warnf("Failed to get attached node for source PV, ignore %s annotation", util.VSphereCNSFastCloneAnno)
delete(backupPVCAnnotations, util.VSphereCNSFastCloneAnno)
} else {
intoleratableNodes = n
}
}
}
backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, backupPVCStorageClass, csiExposeParam.AccessMode, volumeSize, backupPVCReadOnly, backupPVCAnnotations)
@@ -236,6 +253,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
spcNoRelabeling,
csiExposeParam.NodeOS,
csiExposeParam.PriorityClassName,
intoleratableNodes,
)
if err != nil {
return errors.Wrap(err, "error to create backup pod")
@@ -363,8 +381,13 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
}
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
diag += fmt.Sprintf("error listing events, err: %v\n", err)
}
if pod != nil {
diag += kube.DiagnosePod(pod)
diag += kube.DiagnosePod(pod, events)
if pod.Spec.NodeName != "" {
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
@@ -374,7 +397,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
}
if pvc != nil {
diag += kube.DiagnosePVC(pvc)
diag += kube.DiagnosePVC(pvc, events)
if pvc.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
@@ -386,7 +409,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
}
if vs != nil {
diag += csi.DiagnoseVS(vs)
diag += csi.DiagnoseVS(vs, events)
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
@@ -564,6 +587,7 @@ func (e *csiSnapshotExposer) createBackupPod(
spcNoRelabeling bool,
nodeOS string,
priorityClassName string,
intoleratableNodes []string,
) (*corev1api.Pod, error) {
podName := ownerObject.Name
@@ -664,6 +688,18 @@ func (e *csiSnapshotExposer) createBackupPod(
}
var podAffinity *corev1api.Affinity
if len(intoleratableNodes) > 0 {
if affinity == nil {
affinity = &kube.LoadAffinity{}
}
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
Key: "kubernetes.io/hostname",
Values: intoleratableNodes,
Operator: metav1.LabelSelectorOpNotIn,
})
}
if affinity != nil {
podAffinity = kube.ToSystemAffinity([]*kube.LoadAffinity{affinity})
}

View File

@@ -153,6 +153,7 @@ func TestCreateBackupPodWithPriorityClass(t *testing.T) {
false, // spcNoRelabeling
kube.NodeOSLinux,
tc.expectedPriorityClass,
nil,
)
require.NoError(t, err, tc.description)
@@ -237,6 +238,7 @@ func TestCreateBackupPodWithMissingConfigMap(t *testing.T) {
false, // spcNoRelabeling
kube.NodeOSLinux,
"", // empty priority class since config map is missing
nil,
)
// Should succeed even when config map is missing

View File

@@ -39,8 +39,11 @@ import (
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/kube"
storagev1api "k8s.io/api/storage/v1"
)
type reactor struct {
@@ -156,6 +159,31 @@ func TestExpose(t *testing.T) {
},
}
pvName := "pv-1"
volumeAttachement1 := &storagev1api.VolumeAttachment{
ObjectMeta: metav1.ObjectMeta{
Name: "va1",
},
Spec: storagev1api.VolumeAttachmentSpec{
Source: storagev1api.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
NodeName: "node-1",
},
}
volumeAttachement2 := &storagev1api.VolumeAttachment{
ObjectMeta: metav1.ObjectMeta{
Name: "va2",
},
Spec: storagev1api.VolumeAttachmentSpec{
Source: storagev1api.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
NodeName: "node-2",
},
}
tests := []struct {
name string
snapshotClientObj []runtime.Object
@@ -169,6 +197,7 @@ func TestExpose(t *testing.T) {
expectedReadOnlyPVC bool
expectedBackupPVCStorageClass string
expectedAffinity *corev1api.Affinity
expectedPVCAnnotation map[string]string
}{
{
name: "wait vs ready fail",
@@ -624,6 +653,117 @@ func TestExpose(t *testing.T) {
expectedBackupPVCStorageClass: "fake-sc-read-only",
expectedAffinity: nil,
},
{
name: "IntolerateSourceNode, get source node fail",
ownerBackup: backup,
exposeParam: CSISnapshotExposeParam{
SnapshotName: "fake-vs",
SourceNamespace: "fake-ns",
SourcePVName: pvName,
StorageClass: "fake-sc",
AccessMode: AccessModeFileSystem,
OperationTimeout: time.Millisecond,
ExposeTimeout: time.Millisecond,
BackupPVCConfig: map[string]velerotypes.BackupPVC{
"fake-sc": {
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
},
},
Affinity: nil,
},
snapshotClientObj: []runtime.Object{
vsObject,
vscObj,
},
kubeClientObj: []runtime.Object{
daemonSet,
},
kubeReactors: []reactor{
{
verb: "list",
resource: "volumeattachments",
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("fake-create-error")
},
},
},
expectedAffinity: nil,
expectedPVCAnnotation: nil,
},
{
name: "IntolerateSourceNode, get empty source node",
ownerBackup: backup,
exposeParam: CSISnapshotExposeParam{
SnapshotName: "fake-vs",
SourceNamespace: "fake-ns",
SourcePVName: pvName,
StorageClass: "fake-sc",
AccessMode: AccessModeFileSystem,
OperationTimeout: time.Millisecond,
ExposeTimeout: time.Millisecond,
BackupPVCConfig: map[string]velerotypes.BackupPVC{
"fake-sc": {
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
},
},
Affinity: nil,
},
snapshotClientObj: []runtime.Object{
vsObject,
vscObj,
},
kubeClientObj: []runtime.Object{
daemonSet,
},
expectedAffinity: nil,
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
},
{
name: "IntolerateSourceNode, get source nodes",
ownerBackup: backup,
exposeParam: CSISnapshotExposeParam{
SnapshotName: "fake-vs",
SourceNamespace: "fake-ns",
SourcePVName: pvName,
StorageClass: "fake-sc",
AccessMode: AccessModeFileSystem,
OperationTimeout: time.Millisecond,
ExposeTimeout: time.Millisecond,
BackupPVCConfig: map[string]velerotypes.BackupPVC{
"fake-sc": {
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
},
},
Affinity: nil,
},
snapshotClientObj: []runtime.Object{
vsObject,
vscObj,
},
kubeClientObj: []runtime.Object{
daemonSet,
volumeAttachement1,
volumeAttachement2,
},
expectedAffinity: &corev1api.Affinity{
NodeAffinity: &corev1api.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
{
MatchExpressions: []corev1api.NodeSelectorRequirement{
{
Key: "kubernetes.io/hostname",
Operator: corev1api.NodeSelectorOpNotIn,
Values: []string{"node-1", "node-2"},
},
},
},
},
},
},
},
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
},
}
for _, test := range tests {
@@ -705,6 +845,12 @@ func TestExpose(t *testing.T) {
if test.expectedAffinity != nil {
assert.Equal(t, test.expectedAffinity, backupPod.Spec.Affinity)
}
if test.expectedPVCAnnotation != nil {
assert.Equal(t, test.expectedPVCAnnotation, backupPVC.Annotations)
} else {
assert.Empty(t, backupPVC.Annotations)
}
} else {
assert.EqualError(t, err, test.err)
}
@@ -1142,6 +1288,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-pod-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1167,6 +1314,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-pod-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1195,6 +1343,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1213,6 +1362,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1258,6 +1408,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-vs-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1273,6 +1424,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-vs-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1290,6 +1442,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-vs-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -1487,6 +1640,74 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
end diagnose CSI exposer`,
},
{
name: "with events",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithVolumeName,
&backupPV,
&nodeAgentPod,
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
Reason: "reason-1",
Message: "message-1",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-2",
Message: "message-2",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
Reason: "reason-3",
Message: "message-3",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
Reason: "reason-4",
Message: "message-4",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-5",
Message: "message-5",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-6",
Message: "message-6",
},
},
snapshotClientObj: []runtime.Object{
&backupVSWithVSC,
&backupVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
Pod event reason reason-2, message message-2
Pod event reason reason-6, message message-6
PVC velero/fake-backup, phase Pending, binding to fake-pv
PVC event reason reason-3, message message-3
PV fake-pv, phase Pending, reason , message fake-pv-message
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
VS event reason reason-4, message message-4
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
end diagnose CSI exposer`,
},
}

View File

@@ -73,6 +73,12 @@ type GenericRestoreExposeParam struct {
// PriorityClassName is the priority class name for the data mover pod
PriorityClassName string
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *CacheConfigs
}
// GenericRestoreExposer is the interfaces for a generic restore exposer
@@ -148,6 +154,28 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1ap
affinity := kube.GetLoadAffinityByStorageClass(param.LoadAffinity, storageClassName, curLog)
var cachePVC *corev1api.PersistentVolumeClaim
if param.CacheVolume != nil {
cacheVolumeSize := getCacheVolumeSize(param.RestoreSize, param.CacheVolume)
if cacheVolumeSize > 0 {
curLog.Infof("Creating cache PVC with size %v", cacheVolumeSize)
if pvc, err := createCachePVC(ctx, e.kubeClient.CoreV1(), ownerObject, param.CacheVolume.StorageClass, cacheVolumeSize, selectedNode); err != nil {
return errors.Wrap(err, "error to create cache pvc")
} else {
cachePVC = pvc
}
defer func() {
if err != nil {
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVC.Name, cachePVC.Namespace, 0, curLog)
}
}()
} else {
curLog.Infof("Don't need to create cache volume, restore size %v, cache info %v", param.RestoreSize, param.CacheVolume)
}
}
restorePod, err := e.createRestorePod(
ctx,
ownerObject,
@@ -161,6 +189,7 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1ap
param.NodeOS,
affinity,
param.PriorityClassName,
cachePVC,
)
if err != nil {
return errors.Wrapf(err, "error to create restore pod")
@@ -287,8 +316,22 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
}
cachePVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
if err != nil {
cachePVC = nil
if !apierrors.IsNotFound(err) {
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
}
}
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
diag += fmt.Sprintf("error listing events, err: %v\n", err)
}
if pod != nil {
diag += kube.DiagnosePod(pod)
diag += kube.DiagnosePod(pod, events)
if pod.Spec.NodeName != "" {
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
@@ -298,7 +341,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
}
if pvc != nil {
diag += kube.DiagnosePVC(pvc)
diag += kube.DiagnosePVC(pvc, events)
if pvc.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
@@ -309,6 +352,18 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
}
}
if cachePVC != nil {
diag += kube.DiagnosePVC(cachePVC, events)
if cachePVC.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, cachePVC.Spec.VolumeName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting cache pv %s, err: %v\n", cachePVC.Spec.VolumeName, err)
} else {
diag += kube.DiagnosePV(pv)
}
}
}
diag += "end diagnose restore exposer"
return diag
@@ -317,9 +372,11 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1api.ObjectReference) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
cachePVCName := getCachePVCName(ownerObject)
kube.DeletePodIfAny(ctx, e.kubeClient.CoreV1(), restorePodName, ownerObject.Namespace, e.log)
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), restorePVCName, ownerObject.Namespace, 0, e.log)
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVCName, ownerObject.Namespace, 0, e.log)
}
func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject corev1api.ObjectReference, targetPVCName string, targetNamespace string, timeout time.Duration) error {
@@ -428,6 +485,7 @@ func (e *genericRestoreExposer) createRestorePod(
nodeOS string,
affinity *kube.LoadAffinity,
priorityClassName string,
cachePVC *corev1api.PersistentVolumeClaim,
) (*corev1api.Pod, error) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
@@ -456,7 +514,6 @@ func (e *genericRestoreExposer) createRestorePod(
var gracePeriod int64
volumeMounts, volumeDevices, volumePath := kube.MakePodPVCAttachment(volumeName, targetPVC.Spec.VolumeMode, false)
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes := []corev1api.Volume{{
Name: volumeName,
@@ -466,6 +523,25 @@ func (e *genericRestoreExposer) createRestorePod(
},
},
}}
cacheVolumePath := ""
if cachePVC != nil {
mnt, _, path := kube.MakePodPVCAttachment(cacheVolumeName, nil, false)
volumeMounts = append(volumeMounts, mnt...)
volumes = append(volumes, corev1api.Volume{
Name: cacheVolumeName,
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: cachePVC.Name,
},
},
})
cacheVolumePath = path
}
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes = append(volumes, podInfo.volumes...)
if label == nil {
@@ -483,6 +559,7 @@ func (e *genericRestoreExposer) createRestorePod(
fmt.Sprintf("--volume-mode=%s", volumeMode),
fmt.Sprintf("--data-download=%s", ownerObject.Name),
fmt.Sprintf("--resource-timeout=%s", operationTimeout.String()),
fmt.Sprintf("--cache-volume-path=%s", cacheVolumePath),
}
args = append(args, podInfo.logFormatArgs...)

View File

@@ -148,6 +148,7 @@ func TestCreateRestorePodWithPriorityClass(t *testing.T) {
kube.NodeOSLinux,
nil, // affinity
tc.expectedPriorityClass,
nil,
)
require.NoError(t, err, tc.description)
@@ -227,6 +228,7 @@ func TestCreateRestorePodWithMissingConfigMap(t *testing.T) {
kube.NodeOSLinux,
nil, // affinity
"", // empty priority class since config map is missing
nil,
)
// Should succeed even when config map is missing

View File

@@ -26,6 +26,7 @@ import (
appsv1api "k8s.io/api/apps/v1"
corev1api "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -105,6 +106,10 @@ func TestRestoreExpose(t *testing.T) {
targetPVCName string
targetNamespace string
kubeReactors []reactor
cacheVolume *CacheConfigs
expectBackupPod bool
expectBackupPVC bool
expectCachePVC bool
err string
}{
{
@@ -167,6 +172,70 @@ func TestRestoreExpose(t *testing.T) {
},
err: "error to create restore pvc: fake-create-error",
},
{
name: "succeed",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
expectBackupPod: true,
expectBackupPVC: true,
},
{
name: "succeed, cache config, no cache volume",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
cacheVolume: &CacheConfigs{},
expectBackupPod: true,
expectBackupPVC: true,
},
{
name: "create cache volume fail",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
cacheVolume: &CacheConfigs{Limit: 1024},
kubeReactors: []reactor{
{
verb: "create",
resource: "persistentvolumeclaims",
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("fake-create-error")
},
},
},
err: "error to create cache pvc: fake-create-error",
},
{
name: "succeed with cache volume",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
cacheVolume: &CacheConfigs{Limit: 1024},
expectBackupPod: true,
expectBackupPVC: true,
expectCachePVC: true,
},
}
for _, test := range tests {
@@ -203,9 +272,36 @@ func TestRestoreExpose(t *testing.T) {
Resources: corev1api.ResourceRequirements{},
ExposeTimeout: time.Millisecond,
LoadAffinity: nil,
CacheVolume: test.cacheVolume,
},
)
require.EqualError(t, err, test.err)
if test.err != "" {
require.EqualError(t, err, test.err)
} else {
require.NoError(t, err)
}
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
if test.expectBackupPod {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
if test.expectBackupPVC {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), getCachePVCName(ownerObject), metav1.GetOptions{})
if test.expectCachePVC {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
})
}
}
@@ -549,6 +645,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
UID: "fake-pod-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
@@ -574,6 +671,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
UID: "fake-pod-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
@@ -602,6 +700,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
UID: "fake-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
@@ -620,6 +719,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
UID: "fake-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
@@ -647,6 +747,38 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
},
}
cachePVCWithVolumeName := corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore-cache",
UID: "fake-cache-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
Kind: restore.Kind,
Name: restore.Name,
UID: restore.UID,
},
},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeClaimStatus{
Phase: corev1api.ClaimPending,
},
}
cachePV := corev1api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeStatus{
Phase: corev1api.VolumePending,
Message: "fake-pv-message",
},
}
nodeAgentPod := corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
@@ -758,6 +890,98 @@ Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
end diagnose restore exposer`,
},
{
name: "cache pvc with volume name, no pv",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&cachePVCWithVolumeName,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
error getting restore pv fake-pv, err: persistentvolumes "fake-pv" not found
PVC velero/fake-restore-cache, phase Pending, binding to fake-pv-cache
error getting cache pv fake-pv-cache, err: persistentvolumes "fake-pv-cache" not found
end diagnose restore exposer`,
},
{
name: "cache pvc with volume name, pv exists",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&cachePVCWithVolumeName,
&restorePV,
&cachePV,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
PVC velero/fake-restore-cache, phase Pending, binding to fake-pv-cache
PV fake-pv-cache, phase Pending, reason , message fake-pv-message
end diagnose restore exposer`,
},
{
name: "with events",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&restorePV,
&nodeAgentPod,
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
Reason: "reason-1",
Message: "message-1",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-2",
Message: "message-2",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
Reason: "reason-3",
Message: "message-3",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-4",
Message: "message-4",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-5",
Message: "message-5",
},
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
Pod event reason reason-2, message message-2
Pod event reason reason-5, message message-5
PVC velero/fake-restore, phase Pending, binding to fake-pv
PVC event reason reason-3, message message-3
PV fake-pv, phase Pending, reason , message fake-pv-message
end diagnose restore exposer`,
},
}
@@ -915,6 +1139,7 @@ func TestCreateRestorePod(t *testing.T) {
test.nodeOS,
test.affinity,
"", // priority class name
nil,
)
require.NoError(t, err)

View File

@@ -73,6 +73,15 @@ type PodVolumeExposeParam struct {
// PriorityClassName is the priority class name for the data mover pod
PriorityClassName string
// Privileged indicates whether to create the pod with a privileged container
Privileged bool
// RestoreSize specifies the data size for the volume to be restored, for restore only
RestoreSize int64
// CacheVolume specifies the info for cache volumes, for restore only
CacheVolume *CacheConfigs
}
// PodVolumeExposer is the interfaces for a pod volume exposer
@@ -153,7 +162,29 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj
curLog.WithField("path", path).Infof("Host path is retrieved for pod %s, volume %s", param.ClientPodName, param.ClientPodVolume)
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName)
var cachePVC *corev1api.PersistentVolumeClaim
if param.CacheVolume != nil {
cacheVolumeSize := getCacheVolumeSize(param.RestoreSize, param.CacheVolume)
if cacheVolumeSize > 0 {
curLog.Infof("Creating cache PVC with size %v", cacheVolumeSize)
if pvc, err := createCachePVC(ctx, e.kubeClient.CoreV1(), ownerObject, param.CacheVolume.StorageClass, cacheVolumeSize, pod.Spec.NodeName); err != nil {
return errors.Wrap(err, "error to create cache pvc")
} else {
cachePVC = pvc
}
defer func() {
if err != nil {
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVC.Name, cachePVC.Namespace, 0, curLog)
}
}()
} else {
curLog.Infof("Don't need to create cache volume, restore size %v, cache info %v", param.RestoreSize, param.CacheVolume)
}
}
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged, cachePVC)
if err != nil {
return errors.Wrapf(err, "error to create hosting pod")
}
@@ -248,8 +279,22 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
}
cachePVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
if err != nil {
cachePVC = nil
if !apierrors.IsNotFound(err) {
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
}
}
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
diag += fmt.Sprintf("error listing events, err: %v\n", err)
}
if pod != nil {
diag += kube.DiagnosePod(pod)
diag += kube.DiagnosePod(pod, events)
if pod.Spec.NodeName != "" {
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
@@ -258,6 +303,18 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
}
}
if cachePVC != nil {
diag += kube.DiagnosePVC(cachePVC, events)
if cachePVC.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, cachePVC.Spec.VolumeName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting cache pv %s, err: %v\n", cachePVC.Spec.VolumeName, err)
} else {
diag += kube.DiagnosePV(pv)
}
}
}
diag += "end diagnose pod volume exposer"
return diag
@@ -265,11 +322,14 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.ObjectReference) {
restorePodName := ownerObject.Name
cachePVCName := getCachePVCName(ownerObject)
kube.DeletePodIfAny(ctx, e.kubeClient.CoreV1(), restorePodName, ownerObject.Namespace, e.log)
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVCName, ownerObject.Namespace, 0, e.log)
}
func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string,
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string) (*corev1api.Pod, error) {
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool, cachePVC *corev1api.PersistentVolumeClaim) (*corev1api.Pod, error) {
hostingPodName := ownerObject.Name
containerName := string(ownerObject.UID)
@@ -293,7 +353,6 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
MountPath: clientVolumePath,
MountPropagation: &mountPropagation,
}}
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes := []corev1api.Volume{{
Name: clientVolumeName,
@@ -303,6 +362,25 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
},
},
}}
cacheVolumePath := ""
if cachePVC != nil {
mnt, _, path := kube.MakePodPVCAttachment(cacheVolumeName, nil, false)
volumeMounts = append(volumeMounts, mnt...)
volumes = append(volumes, corev1api.Volume{
Name: cacheVolumeName,
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: cachePVC.Name,
},
},
})
cacheVolumePath = path
}
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes = append(volumes, podInfo.volumes...)
args := []string{
@@ -320,6 +398,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
command = append(command, "backup")
} else {
args = append(args, fmt.Sprintf("--pod-volume-restore=%s", ownerObject.Name))
args = append(args, fmt.Sprintf("--cache-volume-path=%s", cacheVolumePath))
command = append(command, "restore")
}
@@ -327,6 +406,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
args = append(args, podInfo.logLevelArgs...)
var securityCtx *corev1api.PodSecurityContext
var containerSecurityCtx *corev1api.SecurityContext
nodeSelector := map[string]string{}
podOS := corev1api.PodOS{}
if nodeOS == kube.NodeOSWindows {
@@ -359,6 +439,9 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
securityCtx = &corev1api.PodSecurityContext{
RunAsUser: &userID,
}
containerSecurityCtx = &corev1api.SecurityContext{
Privileged: &privileged,
}
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
podOS.Name = kube.NodeOSLinux
@@ -394,6 +477,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
Env: podInfo.env,
EnvFrom: podInfo.envFrom,
Resources: resources,
SecurityContext: containerSecurityCtx,
},
},
PriorityClassName: priorityClassName,

View File

@@ -11,10 +11,12 @@ import (
"github.com/stretchr/testify/require"
appsv1api "k8s.io/api/apps/v1"
corev1api "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
clientTesting "k8s.io/client-go/testing"
clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
@@ -72,6 +74,9 @@ func TestPodVolumeExpose(t *testing.T) {
exposeParam PodVolumeExposeParam
funcGetPodVolumeHostPath func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error)
funcExtractPodVolumeHostPath func(context.Context, string, kubernetes.Interface, string, string) (string, error)
kubeReactors []reactor
expectBackupPod bool
expectCachePVC bool
err string
}{
{
@@ -189,6 +194,113 @@ func TestPodVolumeExpose(t *testing.T) {
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
},
{
name: "succeed with privileged pod",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
Privileged: true,
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
},
{
name: "succeed, cache config, no cache volume",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
CacheVolume: &CacheConfigs{},
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
},
{
name: "create cache volume fail",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
CacheVolume: &CacheConfigs{Limit: 1024},
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
kubeReactors: []reactor{
{
verb: "create",
resource: "persistentvolumeclaims",
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("fake-create-error")
},
},
},
err: "error to create cache pvc: fake-create-error",
},
{
name: "succeed with cache volume",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
CacheVolume: &CacheConfigs{Limit: 1024},
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
expectCachePVC: true,
},
}
@@ -196,6 +308,10 @@ func TestPodVolumeExpose(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
for _, reactor := range test.kubeReactors {
fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc)
}
exposer := podVolumeExposer{
kubeClient: fakeKubeClient,
log: velerotest.NewLogger(),
@@ -225,9 +341,23 @@ func TestPodVolumeExpose(t *testing.T) {
require.NoError(t, err)
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
assert.NoError(t, err)
require.NoError(t, err)
} else {
assert.EqualError(t, err, test.err)
require.EqualError(t, err, test.err)
}
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
if test.expectBackupPod {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), getCachePVCName(ownerObject), metav1.GetOptions{})
if test.expectCachePVC {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
})
}
@@ -443,6 +573,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-pod-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -468,6 +599,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-pod-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
@@ -492,6 +624,38 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
},
}
cachePVCWithVolumeName := corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup-cache",
UID: "fake-cache-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeClaimStatus{
Phase: corev1api.ClaimPending,
},
}
cachePV := corev1api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeStatus{
Phase: corev1api.VolumePending,
Message: "fake-pv-message",
},
}
nodeAgentPod := corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
@@ -564,6 +728,79 @@ end diagnose pod volume exposer`,
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
end diagnose pod volume exposer`,
},
{
name: "cache pvc with volume name, no pv",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&cachePVCWithVolumeName,
&nodeAgentPod,
},
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup-cache, phase Pending, binding to fake-pv-cache
error getting cache pv fake-pv-cache, err: persistentvolumes "fake-pv-cache" not found
end diagnose pod volume exposer`,
},
{
name: "cache pvc with volume name, pv exists",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&cachePVCWithVolumeName,
&cachePV,
&nodeAgentPod,
},
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup-cache, phase Pending, binding to fake-pv-cache
PV fake-pv-cache, phase Pending, reason , message fake-pv-message
end diagnose pod volume exposer`,
},
{
name: "with events",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&nodeAgentPod,
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
Reason: "reason-1",
Message: "message-1",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-2",
Message: "message-2",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-3",
Message: "message-3",
},
&corev1api.Event{
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
Type: corev1api.EventTypeWarning,
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
Reason: "reason-4",
Message: "message-4",
},
},
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
Pod event reason reason-2, message message-2
Pod event reason reason-4, message message-4
end diagnose pod volume exposer`,
},
}

View File

@@ -57,6 +57,10 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1api.DaemonSet
daemonSetArgs = append(daemonSetArgs, fmt.Sprintf("--node-agent-configmap=%s", c.nodeAgentConfigMap))
}
if len(c.backupRepoConfigMap) > 0 {
daemonSetArgs = append(daemonSetArgs, fmt.Sprintf("--backup-repository-configmap=%s", c.backupRepoConfigMap))
}
userID := int64(0)
mountPropagationMode := corev1api.MountPropagationHostToContainer

View File

@@ -60,6 +60,10 @@ func TestDaemonSet(t *testing.T) {
assert.Len(t, ds.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--node-agent-configmap=node-agent-config-map", ds.Spec.Template.Spec.Containers[0].Args[2])
ds = DaemonSet("velero", WithBackupRepoConfigMap("backup-repo-config-map"))
assert.Len(t, ds.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--backup-repository-configmap=backup-repo-config-map", ds.Spec.Template.Spec.Containers[0].Args[2])
ds = DaemonSet("velero", WithServiceAccountName("test-sa"))
assert.Equal(t, "test-sa", ds.Spec.Template.Spec.ServiceAccountName)

View File

@@ -426,6 +426,10 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
dsOpts = append(dsOpts, WithNodeAgentConfigMap(o.NodeAgentConfigMap))
}
if len(o.BackupRepoConfigMap) > 0 {
dsOpts = append(dsOpts, WithBackupRepoConfigMap(o.BackupRepoConfigMap))
}
if len(o.KubeletRootDir) > 0 {
dsOpts = append(dsOpts, WithKubeletRootDir(o.KubeletRootDir))
}

View File

@@ -39,6 +39,8 @@ import (
type PVCAction struct {
log logrus.FieldLogger
crClient crclient.Client
// map[namespace]->[map[pvcVolumes]->[]podName]
nsPVCs map[string]map[string][]string
}
func NewPVCAction(f client.Factory) plugincommon.HandlerInitializer {
@@ -78,31 +80,18 @@ func (a *PVCAction) GetRelatedItems(item runtime.Unstructured, backup *v1.Backup
// Adds pods mounting this PVC to ensure that multiple pods mounting the same RWX
// volume get backed up together.
pods := new(corev1api.PodList)
err := a.crClient.List(context.Background(), pods, crclient.InNamespace(pvc.Namespace))
pvcs, err := a.getPVCList(pvc.Namespace)
if err != nil {
return nil, errors.Wrap(err, "failed to list pods")
return nil, err
}
for i := range pods.Items {
for _, volume := range pods.Items[i].Spec.Volumes {
if volume.VolumeSource.PersistentVolumeClaim == nil {
continue
}
if volume.PersistentVolumeClaim.ClaimName == pvc.Name {
if kube.IsPodRunning(&pods.Items[i]) != nil {
a.log.Infof("Related pod %s is not running, not adding to ItemBlock for PVC %s", pods.Items[i].Name, pvc.Name)
} else {
a.log.Infof("Adding related Pod %s to PVC %s", pods.Items[i].Name, pvc.Name)
relatedItems = append(relatedItems, velero.ResourceIdentifier{
GroupResource: kuberesource.Pods,
Namespace: pods.Items[i].Namespace,
Name: pods.Items[i].Name,
})
}
break
}
}
for _, pod := range pvcs[pvc.Name] {
a.log.Infof("Adding related Pod %s to PVC %s", pod, pvc.Name)
relatedItems = append(relatedItems, velero.ResourceIdentifier{
GroupResource: kuberesource.Pods,
Namespace: pvc.Namespace,
Name: pod,
})
}
// Gather groupedPVCs based on VGS label provided in the backup
@@ -117,6 +106,35 @@ func (a *PVCAction) GetRelatedItems(item runtime.Unstructured, backup *v1.Backup
return relatedItems, nil
}
func (a *PVCAction) getPVCList(ns string) (map[string][]string, error) {
if a.nsPVCs == nil {
a.nsPVCs = make(map[string]map[string][]string)
}
pvcList, ok := a.nsPVCs[ns]
if ok {
return pvcList, nil
}
pvcList = make(map[string][]string)
pods := new(corev1api.PodList)
err := a.crClient.List(context.Background(), pods, crclient.InNamespace(ns))
if err != nil {
return nil, errors.Wrap(err, "failed to list pods")
}
for i := range pods.Items {
if kube.IsPodRunning(&pods.Items[i]) != nil {
a.log.Debugf("Pod %s is not running, not adding to Pod list for PVC IBA plugin", pods.Items[i].Name)
continue
}
for _, volume := range pods.Items[i].Spec.Volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvcList[volume.VolumeSource.PersistentVolumeClaim.ClaimName] = append(pvcList[volume.VolumeSource.PersistentVolumeClaim.ClaimName], pods.Items[i].Name)
}
}
}
a.nsPVCs[ns] = pvcList
return pvcList, nil
}
func (a *PVCAction) Name() string {
return "PVCItemBlockAction"
}

View File

@@ -143,6 +143,10 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int
return nil, errors.Errorf("data is not available in config map %s", configName)
}
if len(cm.Data) > 1 {
return nil, errors.Errorf("more than one keys are found in ConfigMap %s's data. only expect one", configName)
}
jsonString := ""
for _, v := range cm.Data {
jsonString = v

View File

@@ -249,6 +249,7 @@ func TestGetConfigs(t *testing.T) {
cmWithValidData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"loadConcurrency\":{\"globalConfig\": 5}}").Result()
cmWithPriorityClass := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"high-priority\"}").Result()
cmWithPriorityClassAndOther := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"low-priority\", \"loadConcurrency\":{\"globalConfig\": 3}}").Result()
cmWithMultipleKeysInData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key-1", "{}", "fake-key-2", "{}").Result()
tests := []struct {
name string
@@ -331,6 +332,14 @@ func TestGetConfigs(t *testing.T) {
},
},
},
{
name: "ConfigMap's Data has more than one key",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
cmWithMultipleKeysInData,
},
expectErr: "more than one keys are found in ConfigMap node-agent-config's data. only expect one",
},
}
for _, test := range tests {

Some files were not shown because too many files have changed in this diff Show More