Compare commits

...

71 Commits

Author SHA1 Message Date
lyndon-li
804d73c4f2 Merge pull request #8601 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 3m56s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 18s
Update 1.15.2 change-log
2025-01-13 13:07:39 +08:00
Lyndon-Li
3d3de7f58a 1.15.2 change-log
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-01-13 11:03:54 +08:00
Wenkai Yin(尹文开)
d2f6c186ad Merge pull request #8596 from ywk253100/20250109_pvb
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m27s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 24s
Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue
2025-01-12 09:19:17 +08:00
Wenkai Yin(尹文开)
25b5c44ebf Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue
Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue

Fixes #8587

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2025-01-10 16:18:52 +08:00
lyndon-li
7db87619ad Merge pull request #8586 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 3m56s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 19s
1.15.2 change-log
2025-01-07 15:41:36 +08:00
Lyndon-Li
b88b543834 1.15.2 change-log
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-01-07 15:11:23 +08:00
lyndon-li
9068c90333 Merge pull request #8585 from kaovilai/expvsFromBackupr1.15
release-1.15: Don't include excluded items in ItemBlocks
2025-01-07 15:03:43 +08:00
Scott Seago
d5ef00a3d4 Don't include excluded items in ItemBlocks
Signed-off-by: Scott Seago <sseago@redhat.com>
2025-01-07 13:40:01 +07:00
Daniel Jiang
674e397bed Merge pull request #8570 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 3m55s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 18s
Add 1.15.2 changelog
2025-01-02 18:37:15 +08:00
Lyndon-Li
1a36d22fa2 1.15.2 changelog
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2025-01-02 17:25:39 +08:00
lyndon-li
7282ac3398 Merge pull request #8568 from reasonerjt/fix-maintenance-job-npe-1.15
[Cherrypick-1.15]fix(pkg/repository/maintenance): don't panic when there's no container statuses
2025-01-02 17:03:01 +08:00
Mikaël Cluseau
bc2e04d0a9 fix(pkg/repository/maintenance): don't panic when there's no container statuses
Signed-off-by: Mikaël Cluseau <mikael.cluseau@gmail.com>
2025-01-02 13:27:19 +08:00
Xun Jiang/Bruce Jiang
200435bb44 Merge pull request #8558 from blackpiglet/sc_map_for_migration_release_1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 3m53s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 19s
[release-1.15] Use the SC mapping cm in all migration cases.
2025-01-02 13:24:46 +08:00
Xun Jiang
a9f935f834 Use the SC mapping cm in all migration cases.
The migration case is verified under the following environments now:
* CSI.
* CSI data mover.
* FSB.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-30 17:24:39 +08:00
Xun Jiang/Bruce Jiang
32499fc287 Merge pull request #8546 from blackpiglet/skip_deprecation_message
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m1s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 20s
Skip the deprecation message for the dry-run install CLI JSON output.
2024-12-23 18:21:51 +08:00
Xun Jiang
b281ddbb9b Skip the deprecation message for the dry-run install CLI JSON output.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-23 17:26:36 +08:00
Wenkai Yin(尹文开)
be14d4f25c Merge pull request #8547 from ywk253100/241223_ut
Bug fix: increase the WaitGroup counter before start the goroutine
2024-12-23 17:25:41 +08:00
Wenkai Yin(尹文开)
0e54406352 Bug fix: increase the WaitGroup counter before start the goroutine
Bug fix: increase the WaitGroup counter before start the goroutine

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2024-12-23 17:04:12 +08:00
Tiger Kaovilai
845f4ee4f7 Merge pull request #8545 from Lyndon-Li/release-1.15
Pin the version of Golang and base image for v1.15.1
2024-12-23 14:31:19 +07:00
Lyndon-Li
6e9397d785 Pin the version of Golang and base image for v1.15.1
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-23 14:49:26 +08:00
Daniel Jiang
5ca1d184e1 Merge pull request #8544 from blackpiglet/migration_init_release_1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m22s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 33s
Modify the Init logic to fix the migration case error.
2024-12-23 12:52:50 +08:00
Xun Jiang
68e381d288 Modify the Init logic to fix the migration case error.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-23 10:54:47 +08:00
Tiger Kaovilai
b661ed7430 Merge pull request #8540 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m12s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 24s
[1.15] hide restic deprecation warning for install with crd-only
2024-12-20 15:25:05 +07:00
Lyndon-Li
11a2be69e1 hide restic deprecation warning for install with crd-only
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-20 16:10:59 +08:00
Wenkai Yin(尹文开)
4376a89bc9 Merge pull request #8533 from blackpiglet/refactor_migration_e2e_release_1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 5m5s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 42s
[cherry-pick] Refactor the migration cases.
2024-12-19 17:04:15 +08:00
Xun Jiang
6f7704a090 Refactor the migration cases.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 16:40:15 +08:00
Wenkai Yin(尹文开)
2b8b6dcd4b Merge pull request #8531 from blackpiglet/8323_fix_release_1.15
Modify the schedule cases.
2024-12-19 16:38:45 +08:00
Xun Jiang
57f873b415 Modify the schedule cases.
* Modify the OrderResource case's verification code.
* Simplify the Periodical case.
* Simplify the InProgess case.
* Prettify the code.
* Replace math/rand with crypto/rand
* Replace PollUnitl with PollUntilContextTimeout

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 15:59:25 +08:00
Daniel Jiang
49a8e2413c Merge pull request #8530 from blackpiglet/fix_storageclass_release_1.15
[cherry-pick] Modify the StorageClass install and delete code.
2024-12-19 15:41:33 +08:00
Xun Jiang/Bruce Jiang
e78d082ede Merge branch 'release-1.15' into fix_storageclass_release_1.15 2024-12-19 15:00:31 +08:00
lyndon-li
a01e82df53 Merge pull request #8529 from Lyndon-Li/release-1.15
1.15.1 change-log
2024-12-19 14:53:23 +08:00
Xun Jiang
5ff921f1c4 Modify the StorageClass install and delete code.
* Only install and uninstall SC and VSC once for default cluster.
* Install and uninstall SC and VSC for standby cluster on migration case.
* Refactor the StorageClass and VolumeSnapshotClass YAMLs.
* Prettify the e2e_suite_test.go

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 14:39:53 +08:00
lyndon-li
e7e66f7d7c Merge branch 'release-1.15' into release-1.15 2024-12-19 14:31:10 +08:00
Daniel Jiang
005a7eeeb8 Merge pull request #8528 from blackpiglet/vks_support_release_1.15
[cherry-pick] VKS support release 1.15
2024-12-19 14:28:17 +08:00
Lyndon-Li
659197bf10 1.15.1 change-log
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-19 14:06:25 +08:00
Xun Jiang
1a15c72262 Modify upgrade and migration cases.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 13:54:49 +08:00
Xun Jiang
53a34757ac Modify other cases to support VKS environment.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 13:53:07 +08:00
Xun Jiang
cd61732f04 E2E supports VKS data mover environment.
* Add new flag HAS_VSPHERE_PLUGIN for E2E test.
* Modify the E2E README for the new parameter.
* Add the VolumeSnapshotClass for VKS.
* Modify the plugin install logic.
* Modify the cases to support data mover case in VKS.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 13:52:57 +08:00
Xun Jiang
3a627f0c81 Make change to support VKS environment.
FYI, the TKGm envrionment support is deprecated.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 13:52:22 +08:00
Tiger Kaovilai
579b1d18b7 Merge pull request #8526 from blackpiglet/bump_crypto_in_restic_release_1.15
Bump the golang.org/x/crypto for Restic in release-1.15.
2024-12-19 11:46:35 +07:00
Xun Jiang
f6c2aca65b Bump the golang.org/x/crypto for Restic to fix CVE-2024-45337.
Bump the golang.org/x/net for Velero and Restic to fix CVE-2024-45338.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-19 10:55:14 +08:00
Tiger Kaovilai
8e3389d274 Merge pull request #8519 from blackpiglet/bump_crypto_release_1.15
Bump golang.org/x/crypto to v0.31.0 to fix CVEs.
2024-12-17 20:02:15 +07:00
Xun Jiang
a10d78c948 Bump golang.org/x/crypto to v0.31.0 to fix CVEs.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-12-17 15:06:44 +08:00
Wenkai Yin(尹文开)
f96b1c240b Merge pull request #8517 from ywk253100/241217_hook
[cherry-pick]Fix backup post hook issue
2024-12-17 14:26:15 +08:00
Wenkai Yin(尹文开)
fe8c086299 Fix backup post hook issue
Fix backup post hook issue

Fixes #8159

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2024-12-17 13:05:46 +08:00
Daniel Jiang
f0001a5b9c Merge pull request #8511 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m35s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 52s
[1.15] Data mover exposer diagnostic
2024-12-13 15:35:03 +08:00
Lyndon-Li
a217e33221 add diagnostic for data mover exposer
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-13 15:12:07 +08:00
Lyndon-Li
1e2ef374d6 add diagnostic for data mover exposer
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-13 15:09:17 +08:00
Lyndon-Li
8087c7f13a add diagnostic for data mover exposer
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-13 15:09:01 +08:00
Daniel Jiang
bda3ec1bc4 Merge pull request #8505 from reasonerjt/repo-aws-region-hint-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m39s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 45s
[Cherry-pick-1.15] Set hinting region to use for GetBucketRegion() in pkg/repository/config/aws.go
2024-12-11 19:34:57 +08:00
Tiger Kaovilai
5ddb319073 Set hinting region to use for GetBucketRegion() in pkg/repository/config/aws.go
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
2024-12-11 15:36:51 +08:00
lyndon-li
f499025d80 Merge pull request #8501 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 4m32s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 31s
[1.15] Issue 8433: add third party labels to data mover pods when the same labels exist in node-agent pods
2024-12-10 14:26:21 +08:00
Lyndon-Li
3f31730003 check existence of the same label from node-agent
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-10 13:44:54 +08:00
Lyndon-Li
cce4d4815a issue 8433: add ask label to data mover pods
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-10 13:42:10 +08:00
lyndon-li
a72aff2a3b Merge pull request #8496 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 5m13s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 1m5s
[1.15] Fix prepare timeout issue
2024-12-09 15:25:53 +08:00
Lyndon-Li
d78947a755 fix prepare timeout issue
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-12-09 15:01:33 +08:00
Daniel Jiang
7d2110ecd1 Merge pull request #8495 from reasonerjt/restore-help-1.15
[Cherry-pick-1.15] Add SecurityContext to restore-helper
2024-12-09 14:36:39 +08:00
Daniel Jiang
666a0ee233 Add SecurityContext to restore-helper
This commit adds SecurityContext that complies with "restricted" level
per Pod Security Standards to "restore-helper" initContainer.
It ensures the restore won't fail when the cluster enforces PSA.

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-12-09 13:50:49 +08:00
Tiger Kaovilai
347653061d Merge pull request #8399 from kaovilai/removeMinimumGoToolchainGo.mod
Some checks failed
Run the E2E test on kind / build (push) Failing after 12m43s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 12m46s
release-1.15: Reduce minimum go toolchain in go.mod.
2024-12-03 20:08:29 -05:00
Tiger Kaovilai
c3967c3365 Reduces go.mod minimum go to patch 1.22.0
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
2024-11-28 03:42:48 -05:00
Tiger Kaovilai
dd1c6a6aab Merge pull request #8465 from blackpiglet/8440_fix
Some checks failed
Run the E2E test on kind / build (push) Failing after 13m1s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 11m13s
[release-1.15]Add nil check for updating DataUpload VolumeInfo in finalizing phase.
2024-11-27 12:21:53 -05:00
Xun Jiang
ecd206acce Add nil check for updating DataUpload VolumeInfo in finalizing phase.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-11-27 18:59:38 +08:00
lyndon-li
40b8551a18 Merge pull request #8404 from Lyndon-Li/release-1.15
Some checks failed
Run the E2E test on kind / build (push) Failing after 10m0s
Run the E2E test on kind / run-e2e-test (1.23.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.23.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.24.17, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.25.16, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.26.13, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.27.10, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.28.6, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, Basic && (ClusterResource || NodePort || StorageClass)) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceFiltering && !Restic) (push) Has been skipped
Run the E2E test on kind / run-e2e-test (1.29.1, ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources) (push) Has been skipped
Main CI / Build (push) Failing after 14m17s
[1.15] Issue 8391: check ErrCancelled from suffix
2024-11-13 13:42:03 +08:00
Lyndon-Li
0fc822b790 issue 8391: check ErrCancelled from suffix
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-11-13 13:18:30 +08:00
lyndon-li
13d8e7de47 Merge pull request #8402 from Lyndon-Li/release-1.15
[1.15] Issue 8394: move closeDataPath outside callbacks
2024-11-13 13:14:39 +08:00
Lyndon-Li
56cab0608f issue 8394: move closeDataPath outside callbacks
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-11-13 11:16:03 +08:00
Xun Jiang/Bruce Jiang
1d4f147597 Merge pull request #8349 from blackpiglet/fix_v1.15.0_migration_case_error
Fix v1.15.0 migration case error
2024-10-28 11:33:28 +08:00
Xun Jiang
6f79c54dfa Fix the KIBISHII_DIRECTORY parameter not working issue.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-10-25 22:25:22 +08:00
Xun Jiang
15ee6a4a58 Refactor the code to get the plugin images for migration cases.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-10-25 15:46:37 +08:00
Wenkai Yin(尹文开)
d2dec9de8b Merge pull request #8336 from Lyndon-Li/release-1.15
Pin the version of Golang and base image for v1.15.0
2024-10-23 14:06:55 +08:00
Lyndon-Li
7cd92a5aac Pin the version of Golang and base image for v1.15.0
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-10-23 13:36:33 +08:00
105 changed files with 4397 additions and 1695 deletions

View File

@@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm AS velero-builder
FROM --platform=$BUILDPLATFORM golang:1.22.10-bookworm AS velero-builder
ARG GOPROXY
ARG BIN
@@ -47,7 +47,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm AS restic-builder
FROM --platform=$BUILDPLATFORM golang:1.22.10-bookworm AS restic-builder
ARG BIN
ARG TARGETOS
@@ -70,7 +70,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Velero image packing section
FROM paketobuildpacks/run-jammy-tiny:latest
FROM paketobuildpacks/run-jammy-tiny:0.2.56
LABEL maintainer="Xun Jiang <jxun@vmware.com>"

View File

@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.22 as tilt-helper
FROM golang:1.22.10 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@@ -1,3 +1,51 @@
## v1.15.2
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.15.2
### Container Image
`velero/velero:v1.15.2`
### Documentation
https://velero.io/docs/v1.15/
### Upgrading
https://velero.io/docs/v1.15/upgrade-to-1.15/
### All Changes
* fix(pkg/repository/maintenance): don't panic when there's no container statuses (#8568, @mcluseau)
* Don't include excluded items in ItemBlocks (#8585, @kaovilai)
* Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue (#8596, @ywk253100)
## v1.15.1
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.15.1
### Container Image
`velero/velero:v1.15.1`
### Documentation
https://velero.io/docs/v1.15/
### Upgrading
https://velero.io/docs/v1.15/upgrade-to-1.15/
### All Changes
* Fix backup post hook issue #8159 (caused by #7571): always execute backup post hooks after PVBs are handled (#8517, @ywk253100)
* Fix issue #8125, log diagnostic info for data mover exposers when expose timeout (#8511, @Lyndon-Li)
* Set hinting region to use for GetBucketRegion() in pkg/repository/config/aws.go (#8505, @kaovilai)
* Fix issue #8433, add third party labels to data mover pods when the same labels exist in node-agent pods (#8501, @Lyndon-Li)
* Fix issue #8485, add an accepted time so as to count the prepare timeout (#8496, @Lyndon-Li)
* Add SecurityContext to restore-helper (#8495, @reasonerjt)
* Add nil check for updating DataUpload VolumeInfo in finalizing phase. (#8465, @blackpiglet)
* Fix issue #8391, check ErrCancelled from suffix of data mover pod's termination message (#8404, @Lyndon-Li)
* Fix issue #8394, don't call closeDataPath in VGDP callbacks, otherwise, the VGDP cleanup will hang (#8402, @Lyndon-Li)
* Reduce minimum required go toolchain in release-1.15 go.mod (#8399, @kaovilai)
* Fix issue #8539, validate uploader types when o.CRDsOnly is set to false only since CRD installation doesn't rely on uploader types (#8540, @Lyndon-Li)
## v1.15
### Download

18
go.mod
View File

@@ -1,5 +1,11 @@
module github.com/vmware-tanzu/velero
// Do not pin patch version here. Leave patch at X.Y.0
// Unset GOTOOLCHAIN to assume GOTOOLCHAIN=local where go cli version in path is used.
// Use env GOTOOLCHAIN=auto to allow go to decide whichever is newer from go.mod or cli in path.
// or GOTOOLCHAIN=goX.Y.Z to use a specific toolchain version
// See: https://go.dev/doc/toolchain#select and https://github.com/vmware-tanzu/velero/issues/8397
// To bump minor version, run `go get go@X.Y.0 toolchain@none` (ie. `go get go@1.23.0 toolchain@none`)
go 1.22.0
require (
@@ -41,9 +47,9 @@ require (
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
golang.org/x/mod v0.17.0
golang.org/x/net v0.26.0
golang.org/x/net v0.33.0
golang.org/x/oauth2 v0.19.0
golang.org/x/text v0.16.0
golang.org/x/text v0.21.0
google.golang.org/api v0.172.0
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
@@ -160,10 +166,10 @@ require (
go.opentelemetry.io/otel/trace v1.25.0 // indirect
go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect

24
go.sum
View File

@@ -793,8 +793,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -881,8 +881,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -910,8 +910,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -983,14 +983,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1000,8 +1000,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$TARGETPLATFORM golang:1.22-bookworm
FROM --platform=$TARGETPLATFORM golang:1.22.10-bookworm
ARG GOPROXY

View File

@@ -1,5 +1,5 @@
diff --git a/go.mod b/go.mod
index 5f939c481..1caa51275 100644
index 5f939c481..95d29c82b 100644
--- a/go.mod
+++ b/go.mod
@@ -24,32 +24,32 @@ require (
@@ -9,17 +9,18 @@ index 5f939c481..1caa51275 100644
- golang.org/x/crypto v0.5.0
- golang.org/x/net v0.5.0
- golang.org/x/oauth2 v0.4.0
+ golang.org/x/crypto v0.21.0
+ golang.org/x/net v0.23.0
+ golang.org/x/oauth2 v0.7.0
golang.org/x/sync v0.1.0
- golang.org/x/sync v0.1.0
- golang.org/x/sys v0.4.0
- golang.org/x/term v0.4.0
- golang.org/x/text v0.6.0
- google.golang.org/api v0.106.0
+ golang.org/x/sys v0.18.0
+ golang.org/x/term v0.18.0
+ golang.org/x/text v0.14.0
+ golang.org/x/crypto v0.31.0
+ golang.org/x/net v0.33.0
+ golang.org/x/oauth2 v0.7.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/sys v0.28.0
+ golang.org/x/term v0.27.0
+ golang.org/x/text v0.21.0
+ google.golang.org/api v0.114.0
)
@@ -62,7 +63,7 @@ index 5f939c481..1caa51275 100644
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 026e1d2fa..27d4207f4 100644
index 026e1d2fa..d164b17e6 100644
--- a/go.sum
+++ b/go.sum
@@ -1,13 +1,13 @@
@@ -126,19 +127,19 @@ index 026e1d2fa..27d4207f4 100644
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -189,11 +189,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
@@ -189,17 +189,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
@@ -147,27 +148,35 @@ index 026e1d2fa..27d4207f4 100644
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -214,17 +214,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=

View File

@@ -69,14 +69,16 @@ type HookTracker struct {
// HookExecutedCnt indicates the number of executed hooks.
hookExecutedCnt int
// hookErrs records hook execution errors if any.
hookErrs []HookErrInfo
hookErrs []HookErrInfo
AsyncItemBlocks *sync.WaitGroup
}
// NewHookTracker creates a hookTracker instance.
func NewHookTracker() *HookTracker {
return &HookTracker{
lock: &sync.RWMutex{},
tracker: make(map[hookKey]hookStatus),
lock: &sync.RWMutex{},
tracker: make(map[hookKey]hookStatus),
AsyncItemBlocks: &sync.WaitGroup{},
}
}
@@ -141,6 +143,8 @@ func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName
// Stat returns the number of attempted hooks and failed hooks
func (ht *HookTracker) Stat() (hookAttemptedCnt int, hookFailedCnt int) {
ht.AsyncItemBlocks.Wait()
ht.lock.RLock()
defer ht.lock.RUnlock()

View File

@@ -37,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/internal/hook"
@@ -298,7 +299,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
var podVolumeBackupper podvolume.Backupper
if kb.podVolumeBackupperFactory != nil {
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(ctx, backupRequest.Backup, kb.uploaderType)
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(ctx, log, backupRequest.Backup, kb.uploaderType)
if err != nil {
log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper")
return errors.WithStack(err)
@@ -438,8 +439,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
"name": items[i].name,
}).Infof("Processing item")
// Skip if this item has already been added to an ItemBlock
if items[i].inItemBlock {
// Skip if this item has already been processed (in a block or previously excluded)
if items[i].inItemBlockOrExcluded {
log.Debugf("Not creating new ItemBlock for %s %s/%s because it's already in an ItemBlock", items[i].groupResource.String(), items[i].namespace, items[i].name)
} else {
if itemBlock == nil {
@@ -474,7 +475,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
addNextToBlock := i < len(items)-1 && items[i].orderedResource && items[i+1].orderedResource && items[i].groupResource == items[i+1].groupResource
if itemBlock != nil && len(itemBlock.Items) > 0 && !addNextToBlock {
log.Infof("Backing Up Item Block including %s %s/%s (%v items in block)", items[i].groupResource.String(), items[i].namespace, items[i].name, len(itemBlock.Items))
backedUpGRs := kb.backupItemBlock(*itemBlock)
backedUpGRs := kb.backupItemBlock(ctx, *itemBlock)
for _, backedUpGR := range backedUpGRs {
backedUpGroupResources[backedUpGR] = true
}
@@ -620,12 +621,23 @@ func (kb *kubernetesBackupper) executeItemBlockActions(
continue
}
itemsMap[relatedItem] = append(itemsMap[relatedItem], &kubernetesResource{
groupResource: relatedItem.GroupResource,
preferredGVR: gvr,
namespace: relatedItem.Namespace,
name: relatedItem.Name,
inItemBlock: true,
groupResource: relatedItem.GroupResource,
preferredGVR: gvr,
namespace: relatedItem.Namespace,
name: relatedItem.Name,
inItemBlockOrExcluded: true,
})
relatedItemMetadata, err := meta.Accessor(item)
if err != nil {
log.WithError(errors.WithStack(err)).Warn("Failed to get object metadata.")
continue
}
// Don't add to ItemBlock if item is excluded
// itemInclusionChecks logs the reason
if !itemBlock.itemBackupper.itemInclusionChecks(log, false, relatedItemMetadata, item, relatedItem.GroupResource) {
continue
}
log.Infof("adding %s %s/%s to ItemBlock", relatedItem.GroupResource, relatedItem.Namespace, relatedItem.Name)
itemBlock.AddUnstructured(relatedItem.GroupResource, item, gvr)
kb.executeItemBlockActions(log, item, relatedItem.GroupResource, relatedItem.Name, relatedItem.Namespace, itemsMap, itemBlock)
@@ -633,7 +645,7 @@ func (kb *kubernetesBackupper) executeItemBlockActions(
}
}
func (kb *kubernetesBackupper) backupItemBlock(itemBlock BackupItemBlock) []schema.GroupResource {
func (kb *kubernetesBackupper) backupItemBlock(ctx context.Context, itemBlock BackupItemBlock) []schema.GroupResource {
// find pods in ItemBlock
// filter pods based on whether they still need to be backed up
// this list will be used to run pre/post hooks
@@ -641,26 +653,22 @@ func (kb *kubernetesBackupper) backupItemBlock(itemBlock BackupItemBlock) []sche
itemBlock.Log.Debug("Executing pre hooks")
for _, item := range itemBlock.Items {
if item.Gr == kuberesource.Pods {
metadata, key, err := kb.itemMetadataAndKey(item)
key, err := kb.getItemKey(item)
if err != nil {
itemBlock.Log.WithError(errors.WithStack(err)).Error("Error accessing pod metadata")
continue
}
// Don't run hooks if pod is excluded
if !itemBlock.itemBackupper.itemInclusionChecks(itemBlock.Log, false, metadata, item.Item, item.Gr) {
continue
}
// Don't run hooks if pod has already been backed up
if _, exists := itemBlock.itemBackupper.backupRequest.BackedUpItems[key]; !exists {
preHookPods = append(preHookPods, item)
}
}
}
postHookPods, failedPods, errs := kb.handleItemBlockHooks(itemBlock, preHookPods, hook.PhasePre)
postHookPods, failedPods, errs := kb.handleItemBlockPreHooks(itemBlock, preHookPods)
for i, pod := range failedPods {
itemBlock.Log.WithError(errs[i]).WithField("name", pod.Item.GetName()).Error("Error running pre hooks for pod")
// if pre hook fails, flag pod as backed-up and move on
_, key, err := kb.itemMetadataAndKey(pod)
key, err := kb.getItemKey(pod)
if err != nil {
itemBlock.Log.WithError(errors.WithStack(err)).Error("Error accessing pod metadata")
continue
@@ -676,34 +684,34 @@ func (kb *kubernetesBackupper) backupItemBlock(itemBlock BackupItemBlock) []sche
}
}
itemBlock.Log.Debug("Executing post hooks")
_, failedPods, errs = kb.handleItemBlockHooks(itemBlock, postHookPods, hook.PhasePost)
for i, pod := range failedPods {
itemBlock.Log.WithError(errs[i]).WithField("name", pod.Item.GetName()).Error("Error running post hooks for pod")
if len(postHookPods) > 0 {
itemBlock.Log.Debug("Executing post hooks")
itemBlock.itemBackupper.hookTracker.AsyncItemBlocks.Add(1)
go kb.handleItemBlockPostHooks(ctx, itemBlock, postHookPods)
}
return grList
}
func (kb *kubernetesBackupper) itemMetadataAndKey(item itemblock.ItemBlockItem) (metav1.Object, itemKey, error) {
func (kb *kubernetesBackupper) getItemKey(item itemblock.ItemBlockItem) (itemKey, error) {
metadata, err := meta.Accessor(item.Item)
if err != nil {
return nil, itemKey{}, err
return itemKey{}, err
}
key := itemKey{
resource: resourceKey(item.Item),
namespace: metadata.GetNamespace(),
name: metadata.GetName(),
}
return metadata, key, nil
return key, nil
}
func (kb *kubernetesBackupper) handleItemBlockHooks(itemBlock BackupItemBlock, hookPods []itemblock.ItemBlockItem, phase hook.HookPhase) ([]itemblock.ItemBlockItem, []itemblock.ItemBlockItem, []error) {
func (kb *kubernetesBackupper) handleItemBlockPreHooks(itemBlock BackupItemBlock, hookPods []itemblock.ItemBlockItem) ([]itemblock.ItemBlockItem, []itemblock.ItemBlockItem, []error) {
var successPods []itemblock.ItemBlockItem
var failedPods []itemblock.ItemBlockItem
var errs []error
for _, pod := range hookPods {
err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks, phase, itemBlock.itemBackupper.hookTracker)
err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks, hook.PhasePre, itemBlock.itemBackupper.hookTracker)
if err == nil {
successPods = append(successPods, pod)
} else {
@@ -714,6 +722,66 @@ func (kb *kubernetesBackupper) handleItemBlockHooks(itemBlock BackupItemBlock, h
return successPods, failedPods, errs
}
// The hooks cannot execute until the PVBs to be processed
func (kb *kubernetesBackupper) handleItemBlockPostHooks(ctx context.Context, itemBlock BackupItemBlock, hookPods []itemblock.ItemBlockItem) {
log := itemBlock.Log
defer itemBlock.itemBackupper.hookTracker.AsyncItemBlocks.Done()
// the post hooks will not execute until all PVBs of the item block pods are processed
if err := kb.waitUntilPVBsProcessed(ctx, log, itemBlock, hookPods); err != nil {
log.WithError(err).Error("failed to wait PVBs processed for the ItemBlock")
return
}
for _, pod := range hookPods {
if err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks,
hook.PhasePost, itemBlock.itemBackupper.hookTracker); err != nil {
log.WithError(err).WithField("name", pod.Item.GetName()).Error("Error running post hooks for pod")
}
}
}
// wait all PVBs of the item block pods to be processed
func (kb *kubernetesBackupper) waitUntilPVBsProcessed(ctx context.Context, log logrus.FieldLogger, itemBlock BackupItemBlock, pods []itemblock.ItemBlockItem) error {
pvbMap := map[*velerov1api.PodVolumeBackup]bool{}
for _, pod := range pods {
namespace, name := pod.Item.GetNamespace(), pod.Item.GetName()
pvbs, err := itemBlock.itemBackupper.podVolumeBackupper.ListPodVolumeBackupsByPod(namespace, name)
if err != nil {
return errors.Wrapf(err, "failed to list PodVolumeBackups for pod %s/%s", namespace, name)
}
for _, pvb := range pvbs {
pvbMap[pvb] = pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed
}
}
checkFunc := func(context.Context) (done bool, err error) {
allProcessed := true
for pvb, processed := range pvbMap {
if processed {
continue
}
updatedPVB, err := itemBlock.itemBackupper.podVolumeBackupper.GetPodVolumeBackup(pvb.Namespace, pvb.Name)
if err != nil {
allProcessed = false
log.Infof("failed to get PVB: %v", err)
continue
}
if updatedPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
updatedPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed {
pvbMap[pvb] = true
continue
}
allProcessed = false
}
return allProcessed, nil
}
return wait.PollUntilContextCancel(ctx, 5*time.Second, true, checkFunc)
}
func (kb *kubernetesBackupper) backupItem(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource, itemBlock *BackupItemBlock) bool {
backedUpItem, _, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR, false, false, itemBlock)
if aggregate, ok := err.(kubeerrs.Aggregate); ok {
@@ -1043,7 +1111,8 @@ func updateVolumeInfos(
for index := range volumeInfos {
if volumeInfos[index].PVCName == dataUpload.Spec.SourcePVC &&
volumeInfos[index].PVCNamespace == dataUpload.Spec.SourceNamespace {
volumeInfos[index].PVCNamespace == dataUpload.Spec.SourceNamespace &&
volumeInfos[index].SnapshotDataMovementInfo != nil {
if dataUpload.Status.CompletionTimestamp != nil {
volumeInfos[index].CompletionTimestamp = dataUpload.Status.CompletionTimestamp
}

View File

@@ -3894,7 +3894,17 @@ func TestBackupWithHooks(t *testing.T) {
require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, tc.actions, nil))
if tc.wantHookExecutionLog != nil {
assert.Equal(t, tc.wantHookExecutionLog, podCommandExecutor.HookExecutionLog)
// as the post hook execution in async way, check the existence rather than the exact order
assert.Equal(t, len(tc.wantHookExecutionLog), len(podCommandExecutor.HookExecutionLog))
m := map[string]struct{}{}
for _, entry := range podCommandExecutor.HookExecutionLog {
m[entry.String()] = struct{}{}
}
for _, entry := range tc.wantHookExecutionLog {
_, exist := m[entry.String()]
assert.True(t, exist)
}
}
assertTarballContents(t, backupFile, append(tc.wantBackedUp, "metadata/version")...)
})
@@ -3903,7 +3913,7 @@ func TestBackupWithHooks(t *testing.T) {
type fakePodVolumeBackupperFactory struct{}
func (f *fakePodVolumeBackupperFactory) NewBackupper(context.Context, *velerov1.Backup, string) (podvolume.Backupper, error) {
func (f *fakePodVolumeBackupperFactory) NewBackupper(context.Context, logrus.FieldLogger, *velerov1.Backup, string) (podvolume.Backupper, error) {
return &fakePodVolumeBackupper{}, nil
}
@@ -3936,6 +3946,24 @@ func (b *fakePodVolumeBackupper) WaitAllPodVolumesProcessed(log logrus.FieldLogg
return b.pvbs
}
func (b *fakePodVolumeBackupper) GetPodVolumeBackup(namespace, name string) (*velerov1.PodVolumeBackup, error) {
for _, pvb := range b.pvbs {
if pvb.Namespace == namespace && pvb.Name == name {
return pvb, nil
}
}
return nil, nil
}
func (b *fakePodVolumeBackupper) ListPodVolumeBackupsByPod(podNamespace, podName string) ([]*velerov1.PodVolumeBackup, error) {
var pvbs []*velerov1.PodVolumeBackup
for _, pvb := range b.pvbs {
if pvb.Spec.Pod.Namespace == podNamespace && pvb.Spec.Pod.Name == podName {
pvbs = append(pvbs, pvb)
}
}
return pvbs, nil
}
// TestBackupWithPodVolume runs backups of pods that are annotated for PodVolume backup,
// and ensures that the pod volume backupper is called, that the returned PodVolumeBackups
// are added to the Request object, and that when PVCs are backed up with PodVolume, the
@@ -4199,7 +4227,7 @@ func newHarness(t *testing.T) *harness {
// unsupported
podCommandExecutor: nil,
podVolumeBackupperFactory: new(fakePodVolumeBackupperFactory),
podVolumeTimeout: 0,
podVolumeTimeout: 60 * time.Second,
},
log: log,
}
@@ -5510,6 +5538,36 @@ func TestUpdateVolumeInfos(t *testing.T) {
},
},
},
{
// This is an error case. No crash happen here is good enough.
name: "VolumeInfo doesn't have SnapshotDataMovementInfo when there is a matching DataUpload",
operations: []*itemoperation.BackupOperation{},
dataUpload: builder.ForDataUpload("velero", "du-1").
CompletionTimestamp(&now).
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
Result(),
volumeInfos: []*volume.BackupVolumeInfo{
{
PVCName: "pvc-1",
PVCNamespace: "ns-1",
CompletionTimestamp: &metav1.Time{},
SnapshotDataMovementInfo: nil,
},
},
expectedVolumeInfos: []*volume.BackupVolumeInfo{
{
PVCName: "pvc-1",
PVCNamespace: "ns-1",
CompletionTimestamp: &metav1.Time{},
SnapshotDataMovementInfo: nil,
},
},
},
}
for _, tc := range tests {
@@ -5526,8 +5584,10 @@ func TestUpdateVolumeInfos(t *testing.T) {
}
require.NoError(t, updateVolumeInfos(tc.volumeInfos, unstructures, tc.operations, logger))
require.Equal(t, tc.expectedVolumeInfos[0].CompletionTimestamp, tc.volumeInfos[0].CompletionTimestamp)
require.Equal(t, tc.expectedVolumeInfos[0].SnapshotDataMovementInfo, tc.volumeInfos[0].SnapshotDataMovementInfo)
if len(tc.expectedVolumeInfos) > 0 {
require.Equal(t, tc.expectedVolumeInfos[0].CompletionTimestamp, tc.volumeInfos[0].CompletionTimestamp)
require.Equal(t, tc.expectedVolumeInfos[0].SnapshotDataMovementInfo, tc.volumeInfos[0].SnapshotDataMovementInfo)
}
})
}
}

View File

@@ -176,7 +176,9 @@ type kubernetesResource struct {
preferredGVR schema.GroupVersionResource
namespace, name, path string
orderedResource bool
inItemBlock bool // set to true during backup processing when added to an ItemBlock
// set to true during backup processing when added to an ItemBlock
// or if the item is excluded from backup.
inItemBlockOrExcluded bool
}
// getItemsFromResourceIdentifiers get the kubernetesResources

View File

@@ -22,6 +22,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/vmware-tanzu/velero/pkg/itemblock"
@@ -41,12 +42,12 @@ func NewBackupItemBlock(log logrus.FieldLogger, itemBackupper *itemBackupper) *B
}
func (b *BackupItemBlock) addKubernetesResource(item *kubernetesResource, log logrus.FieldLogger) *unstructured.Unstructured {
// no-op if item is already in a block
if item.inItemBlock {
// no-op if item has already been processed (in a block or previously excluded)
if item.inItemBlockOrExcluded {
return nil
}
var unstructured unstructured.Unstructured
item.inItemBlock = true
item.inItemBlockOrExcluded = true
f, err := os.Open(item.path)
if err != nil {
@@ -60,6 +61,18 @@ func (b *BackupItemBlock) addKubernetesResource(item *kubernetesResource, log lo
log.WithError(errors.WithStack(err)).Error("Error decoding JSON from file")
return nil
}
metadata, err := meta.Accessor(&unstructured)
if err != nil {
log.WithError(errors.WithStack(err)).Warn("Error accessing item metadata")
return nil
}
// Don't add to ItemBlock if item is excluded
// itemInclusionChecks logs the reason
if !b.itemBackupper.itemInclusionChecks(log, false, metadata, &unstructured, item.groupResource) {
return nil
}
log.Infof("adding %s %s/%s to ItemBlock", item.groupResource, item.namespace, item.name)
b.AddUnstructured(item.groupResource, &unstructured, item.preferredGVR)
return &unstructured

View File

@@ -118,6 +118,12 @@ func (d *DataDownloadBuilder) Labels(labels map[string]string) *DataDownloadBuil
return d
}
// Annotations sets the DataDownload's Annotations.
func (d *DataDownloadBuilder) Annotations(annotations map[string]string) *DataDownloadBuilder {
d.object.Annotations = annotations
return d
}
// StartTimestamp sets the DataDownload's StartTimestamp.
func (d *DataDownloadBuilder) StartTimestamp(startTime *metav1.Time) *DataDownloadBuilder {
d.object.Status.StartTimestamp = startTime

View File

@@ -133,6 +133,12 @@ func (d *DataUploadBuilder) Labels(labels map[string]string) *DataUploadBuilder
return d
}
// Annotations sets the DataUpload's Annotations.
func (d *DataUploadBuilder) Annotations(annotations map[string]string) *DataUploadBuilder {
d.object.Annotations = annotations
return d
}
// Progress sets the DataUpload's Progress.
func (d *DataUploadBuilder) Progress(progress shared.DataMoveOperationProgress) *DataUploadBuilder {
d.object.Status.Progress = progress

View File

@@ -42,33 +42,31 @@ import (
// Options collects all the options for installing Velero into a Kubernetes cluster.
type Options struct {
Namespace string
Image string
BucketName string
Prefix string
ProviderName string
PodAnnotations flag.Map
PodLabels flag.Map
ServiceAccountAnnotations flag.Map
ServiceAccountName string
VeleroPodCPURequest string
VeleroPodMemRequest string
VeleroPodCPULimit string
VeleroPodMemLimit string
NodeAgentPodCPURequest string
NodeAgentPodMemRequest string
NodeAgentPodCPULimit string
NodeAgentPodMemLimit string
RestoreOnly bool
SecretFile string
NoSecret bool
DryRun bool
BackupStorageConfig flag.Map
VolumeSnapshotConfig flag.Map
UseNodeAgent bool
PrivilegedNodeAgent bool
//TODO remove UseRestic when migration test out of using it
UseRestic bool
Namespace string
Image string
BucketName string
Prefix string
ProviderName string
PodAnnotations flag.Map
PodLabels flag.Map
ServiceAccountAnnotations flag.Map
ServiceAccountName string
VeleroPodCPURequest string
VeleroPodMemRequest string
VeleroPodCPULimit string
VeleroPodMemLimit string
NodeAgentPodCPURequest string
NodeAgentPodMemRequest string
NodeAgentPodCPULimit string
NodeAgentPodMemLimit string
RestoreOnly bool
SecretFile string
NoSecret bool
DryRun bool
BackupStorageConfig flag.Map
VolumeSnapshotConfig flag.Map
UseNodeAgent bool
PrivilegedNodeAgent bool
Wait bool
UseVolumeSnapshots bool
DefaultRepoMaintenanceFrequency time.Duration
@@ -415,17 +413,17 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er
return err
}
// If we're only installing CRDs, we can skip the rest of the validation.
if o.CRDsOnly {
return nil
}
if msg, err := uploader.ValidateUploaderType(o.UploaderType); err != nil {
return err
} else if msg != "" {
fmt.Printf("⚠️ %s\n", msg)
}
// If we're only installing CRDs, we can skip the rest of the validation.
if o.CRDsOnly {
return nil
}
// Our main 3 providers don't support bucket names starting with a dash, and a bucket name starting with one
// can indicate that an environment variable was left blank.
// This case will help catch that error

View File

@@ -19,6 +19,7 @@ package controller
import (
"context"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
@@ -47,7 +48,9 @@ import (
"github.com/vmware-tanzu/velero/pkg/datapath"
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
@@ -178,6 +181,15 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
}
hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
hostingPodLabels[k] = v
}
}
// Expose() will trigger to create one pod whose volume is restored by a given volume snapshot,
// but the pod maybe is not in the same node of the current controller, so we need to return it here.
@@ -223,9 +235,11 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
} else if peekErr := r.restoreExposer.PeekExposed(ctx, getDataDownloadOwnerObject(dd)); peekErr != nil {
r.tryCancelAcceptedDataDownload(ctx, dd, fmt.Sprintf("found a dataupload %s/%s with expose error: %s. mark it as cancel", dd.Namespace, dd.Name, peekErr))
log.Errorf("Cancel dd %s/%s because of expose error %s", dd.Namespace, dd.Name, peekErr)
} else if dd.Status.StartTimestamp != nil {
if time.Since(dd.Status.StartTimestamp.Time) >= r.preparingTimeout {
r.onPrepareTimeout(ctx, dd)
} else if at, found := dd.Annotations[acceptTimeAnnoKey]; found {
if t, err := time.Parse(time.RFC3339, at); err == nil {
if time.Since(t) >= r.preparingTimeout {
r.onPrepareTimeout(ctx, dd)
}
}
}
@@ -632,12 +646,13 @@ func (r *DataDownloadReconciler) acceptDataDownload(ctx context.Context, dd *vel
updateFunc := func(datadownload *velerov2alpha1api.DataDownload) {
datadownload.Status.Phase = velerov2alpha1api.DataDownloadPhaseAccepted
labels := datadownload.GetLabels()
if labels == nil {
labels = make(map[string]string)
annotations := datadownload.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
labels[acceptNodeLabelKey] = r.nodeName
datadownload.SetLabels(labels)
annotations[acceptNodeAnnoKey] = r.nodeName
annotations[acceptTimeAnnoKey] = r.Clock.Now().Format(time.RFC3339)
datadownload.SetAnnotations(annotations)
}
succeeded, err := r.exclusiveUpdateDataDownload(ctx, updated, updateFunc)
@@ -675,6 +690,11 @@ func (r *DataDownloadReconciler) onPrepareTimeout(ctx context.Context, dd *veler
return
}
diags := strings.Split(r.restoreExposer.DiagnoseExpose(ctx, getDataDownloadOwnerObject(dd)), "\n")
for _, diag := range diags {
log.Warnf("[Diagnose DD expose]%s", diag)
}
r.restoreExposer.CleanUp(ctx, getDataDownloadOwnerObject(dd))
log.Info("Dataupload has been cleaned up")

View File

@@ -349,7 +349,7 @@ func TestDataDownloadReconcile(t *testing.T) {
},
{
name: "prepare timeout",
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).StartTimestamp(&metav1.Time{Time: time.Now().Add(-time.Minute * 5)}).Result(),
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Annotations(map[string]string{acceptTimeAnnoKey: (time.Now().Add(-time.Minute * 5)).Format(time.RFC3339)}).Result(),
expected: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseFailed).Result(),
},
{
@@ -496,7 +496,7 @@ func TestDataDownloadReconcile(t *testing.T) {
if test.expected != nil {
require.NoError(t, err)
assert.Equal(t, dd.Status.Phase, test.expected.Status.Phase)
assert.Equal(t, test.expected.Status.Phase, dd.Status.Phase)
}
if test.isGetExposeErr {
@@ -971,6 +971,10 @@ func (dt *ddResumeTestHelper) PeekExposed(context.Context, corev1.ObjectReferenc
return nil
}
func (dt *ddResumeTestHelper) DiagnoseExpose(context.Context, corev1.ObjectReference) string {
return ""
}
func (dt *ddResumeTestHelper) RebindVolume(context.Context, corev1.ObjectReference, string, string, time.Duration) error {
return nil
}
@@ -1003,22 +1007,22 @@ func TestAttemptDataDownloadResume(t *testing.T) {
},
{
name: "accepted DataDownload in the current node",
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Result(),
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Annotations(map[string]string{acceptNodeAnnoKey: "node-1"}).Result(),
cancelledDataDownloads: []string{dataDownloadName},
acceptedDataDownloads: []string{dataDownloadName},
},
{
name: "accepted DataDownload with dd label but is canceled",
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Cancel(true).Labels(map[string]string{
acceptNodeLabelKey: "node-1",
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Cancel(true).Annotations(map[string]string{
acceptNodeAnnoKey: "node-1",
}).Result(),
acceptedDataDownloads: []string{dataDownloadName},
cancelledDataDownloads: []string{dataDownloadName},
},
{
name: "accepted DataDownload with dd label but cancel fail",
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Labels(map[string]string{
acceptNodeLabelKey: "node-1",
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Annotations(map[string]string{
acceptNodeAnnoKey: "node-1",
}).Result(),
needErrs: []bool{false, false, true, false, false, false},
acceptedDataDownloads: []string{dataDownloadName},

View File

@@ -19,6 +19,7 @@ package controller
import (
"context"
"fmt"
"strings"
"time"
snapshotter "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/typed/volumesnapshot/v1"
@@ -50,12 +51,14 @@ import (
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
const (
dataUploadDownloadRequestor = "snapshot-data-upload-download"
acceptNodeLabelKey = "velero.io/accepted-by"
acceptNodeAnnoKey = "velero.io/accepted-by"
acceptTimeAnnoKey = "velero.io/accepted-at"
DataUploadDownloadFinalizer = "velero.io/data-upload-download-finalizer"
preparingMonitorFrequency = time.Minute
)
@@ -255,9 +258,11 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
} else if peekErr := ep.PeekExposed(ctx, getOwnerObject(du)); peekErr != nil {
r.tryCancelAcceptedDataUpload(ctx, du, fmt.Sprintf("found a dataupload %s/%s with expose error: %s. mark it as cancel", du.Namespace, du.Name, peekErr))
log.Errorf("Cancel du %s/%s because of expose error %s", du.Namespace, du.Name, peekErr)
} else if du.Status.StartTimestamp != nil {
if time.Since(du.Status.StartTimestamp.Time) >= r.preparingTimeout {
r.onPrepareTimeout(ctx, du)
} else if at, found := du.Annotations[acceptTimeAnnoKey]; found {
if t, err := time.Parse(time.RFC3339, at); err == nil {
if time.Since(t) >= r.preparingTimeout {
r.onPrepareTimeout(ctx, du)
}
}
}
@@ -699,12 +704,13 @@ func (r *DataUploadReconciler) acceptDataUpload(ctx context.Context, du *velerov
updateFunc := func(dataUpload *velerov2alpha1api.DataUpload) {
dataUpload.Status.Phase = velerov2alpha1api.DataUploadPhaseAccepted
labels := dataUpload.GetLabels()
if labels == nil {
labels = make(map[string]string)
annotations := dataUpload.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
labels[acceptNodeLabelKey] = r.nodeName
dataUpload.SetLabels(labels)
annotations[acceptNodeAnnoKey] = r.nodeName
annotations[acceptTimeAnnoKey] = r.Clock.Now().Format(time.RFC3339)
dataUpload.SetAnnotations(annotations)
}
succeeded, err := r.exclusiveUpdateDataUpload(ctx, updated, updateFunc)
@@ -753,6 +759,11 @@ func (r *DataUploadReconciler) onPrepareTimeout(ctx context.Context, du *velerov
volumeSnapshotName = du.Spec.CSISnapshot.VolumeSnapshot
}
diags := strings.Split(ep.DiagnoseExpose(ctx, getOwnerObject(du)), "\n")
for _, diag := range diags {
log.Warnf("[Diagnose DU expose]%s", diag)
}
ep.CleanUp(ctx, getOwnerObject(du), volumeSnapshotName, du.Spec.SourceNamespace)
log.Info("Dataupload has been cleaned up")
@@ -804,11 +815,22 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
accessMode = exposer.AccessModeBlock
}
hostingPodLabels := map[string]string{velerov1api.DataUploadLabel: du.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
r.logger.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
} else {
hostingPodLabels[k] = v
}
}
return &exposer.CSISnapshotExposeParam{
SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot,
SourceNamespace: du.Spec.SourceNamespace,
StorageClass: du.Spec.CSISnapshot.StorageClass,
HostingPodLabels: map[string]string{velerov1api.DataUploadLabel: du.Name},
HostingPodLabels: hostingPodLabels,
AccessMode: accessMode,
OperationTimeout: du.Spec.OperationTimeout.Duration,
ExposeTimeout: r.preparingTimeout,

View File

@@ -300,6 +300,10 @@ func (f *fakeSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev
return f.peekErr
}
func (f *fakeSnapshotExposer) DiagnoseExpose(context.Context, corev1.ObjectReference) string {
return ""
}
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1.ObjectReference, string, string) {
}
@@ -475,7 +479,7 @@ func TestReconcile(t *testing.T) {
},
{
name: "prepare timeout",
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).SnapshotType(fakeSnapshotType).StartTimestamp(&metav1.Time{Time: time.Now().Add(-time.Minute * 5)}).Result(),
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).SnapshotType(fakeSnapshotType).Annotations(map[string]string{acceptTimeAnnoKey: (time.Now().Add(-time.Minute * 5)).Format(time.RFC3339)}).Result(),
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
},
{
@@ -607,7 +611,7 @@ func TestReconcile(t *testing.T) {
require.Error(t, err)
} else {
require.NoError(t, err)
assert.Equal(t, du.Status.Phase, test.expected.Status.Phase)
assert.Equal(t, test.expected.Status.Phase, du.Status.Phase)
}
if test.expectedProcessed {
@@ -1043,6 +1047,10 @@ func (dt *duResumeTestHelper) PeekExposed(context.Context, corev1.ObjectReferenc
return nil
}
func (dt *duResumeTestHelper) DiagnoseExpose(context.Context, corev1.ObjectReference) string {
return ""
}
func (dt *duResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference, string, string) {}
func (dt *duResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string,
@@ -1071,19 +1079,19 @@ func TestAttemptDataUploadResume(t *testing.T) {
},
{
name: "accepted DataUpload in the current node",
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Result(),
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Annotations(map[string]string{acceptNodeAnnoKey: "node-1"}).Result(),
cancelledDataUploads: []string{dataUploadName},
acceptedDataUploads: []string{dataUploadName},
},
{
name: "accepted DataUpload in the current node but canceled",
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Cancel(true).Result(),
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Annotations(map[string]string{acceptNodeAnnoKey: "node-1"}).Cancel(true).Result(),
cancelledDataUploads: []string{dataUploadName},
acceptedDataUploads: []string{dataUploadName},
},
{
name: "accepted DataUpload in the current node but update error",
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Result(),
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Annotations(map[string]string{acceptNodeAnnoKey: "node-1"}).Result(),
needErrs: []bool{false, false, true, false, false, false},
acceptedDataUploads: []string{dataUploadName},
},

View File

@@ -141,6 +141,7 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseInProgress
pvb.Status.StartTimestamp = &metav1.Time{Time: r.clock.Now()}
if err := r.Client.Patch(ctx, &pvb, client.MergeFrom(original)); err != nil {
r.closeDataPath(ctx, pvb.Name)
return r.errorOut(ctx, &pvb, err, "error updating PodVolumeBackup status", log)
}
@@ -150,11 +151,13 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
Name: pvb.Spec.Pod.Name,
}
if err := r.Client.Get(ctx, podNamespacedName, &pod); err != nil {
r.closeDataPath(ctx, pvb.Name)
return r.errorOut(ctx, &pvb, err, fmt.Sprintf("getting pod %s/%s", pvb.Spec.Pod.Namespace, pvb.Spec.Pod.Name), log)
}
path, err := exposer.GetPodVolumeHostPath(ctx, &pod, pvb.Spec.Volume, r.Client, r.fileSystem, log)
if err != nil {
r.closeDataPath(ctx, pvb.Name)
return r.errorOut(ctx, &pvb, err, "error exposing host path for pod volume", log)
}
@@ -169,6 +172,7 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
RepositoryEnsurer: r.repositoryEnsurer,
CredentialGetter: r.credentialGetter,
}); err != nil {
r.closeDataPath(ctx, pvb.Name)
return r.errorOut(ctx, &pvb, err, "error to initialize data path", log)
}
@@ -193,6 +197,7 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
ForceFull: false,
Tags: pvb.Spec.Tags,
}); err != nil {
r.closeDataPath(ctx, pvb.Name)
return r.errorOut(ctx, &pvb, err, "error starting data path backup", log)
}
@@ -361,7 +366,6 @@ func (r *PodVolumeBackupReconciler) closeDataPath(ctx context.Context, pvbName s
}
func (r *PodVolumeBackupReconciler) errorOut(ctx context.Context, pvb *velerov1api.PodVolumeBackup, err error, msg string, log logrus.FieldLogger) (ctrl.Result, error) {
r.closeDataPath(ctx, pvb.Name)
_ = UpdatePVBStatusToFailed(ctx, r.Client, pvb, err, msg, r.clock.Now(), log)
return ctrl.Result{}, err

View File

@@ -131,11 +131,13 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseInProgress
pvr.Status.StartTimestamp = &metav1.Time{Time: c.clock.Now()}
if err = c.Patch(ctx, pvr, client.MergeFrom(original)); err != nil {
c.closeDataPath(ctx, pvr.Name)
return c.errorOut(ctx, pvr, err, "error to update status to in progress", log)
}
volumePath, err := exposer.GetPodVolumeHostPath(ctx, pod, pvr.Spec.Volume, c.Client, c.fileSystem, log)
if err != nil {
c.closeDataPath(ctx, pvr.Name)
return c.errorOut(ctx, pvr, err, "error exposing host path for pod volume", log)
}
@@ -150,10 +152,12 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
RepositoryEnsurer: c.repositoryEnsurer,
CredentialGetter: c.credentialGetter,
}); err != nil {
c.closeDataPath(ctx, pvr.Name)
return c.errorOut(ctx, pvr, err, "error to initialize data path", log)
}
if err := fsRestore.StartRestore(pvr.Spec.SnapshotID, volumePath, pvr.Spec.UploaderSettings); err != nil {
c.closeDataPath(ctx, pvr.Name)
return c.errorOut(ctx, pvr, err, "error starting data path restore", log)
}
@@ -163,7 +167,6 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
}
func (c *PodVolumeRestoreReconciler) errorOut(ctx context.Context, pvr *velerov1api.PodVolumeRestore, err error, msg string, log logrus.FieldLogger) (ctrl.Result, error) {
c.closeDataPath(ctx, pvr.Name)
_ = UpdatePVRStatusToFailed(ctx, c.Client, pvr, errors.WithMessage(err, msg).Error(), c.clock.Now(), log)
return ctrl.Result{}, err
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"os"
"strings"
"sync"
"time"
@@ -321,7 +322,7 @@ func (ms *microServiceBRWatcher) startWatch() {
if lastPod.Status.Phase == v1.PodSucceeded {
ms.callbacks.OnCompleted(ms.ctx, ms.namespace, ms.taskName, funcGetResultFromMessage(ms.taskType, terminateMessage, ms.log))
} else {
if terminateMessage == ErrCancelled {
if strings.HasSuffix(terminateMessage, ErrCancelled) {
ms.callbacks.OnCancelled(ms.ctx, ms.namespace, ms.taskName)
} else {
ms.callbacks.OnFailed(ms.ctx, ms.namespace, ms.taskName, errors.New(terminateMessage))

View File

@@ -360,7 +360,7 @@ func TestStartWatch(t *testing.T) {
event: &v1.Event{Reason: EventReasonStopped},
},
},
terminationMessage: ErrCancelled,
terminationMessage: fmt.Sprintf("Failed to init data path service for DataUpload %s: %v", "fake-du-name", errors.New(ErrCancelled)),
expectStartEvent: true,
expectTerminateEvent: true,
expectCancel: true,

View File

@@ -308,6 +308,70 @@ func (e *csiSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1
return nil
}
func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject corev1.ObjectReference) string {
backupPodName := ownerObject.Name
backupPVCName := ownerObject.Name
backupVSName := ownerObject.Name
diag := "begin diagnose CSI exposer\n"
pod, err := e.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(ctx, backupPodName, metav1.GetOptions{})
if err != nil {
pod = nil
diag += fmt.Sprintf("error getting backup pod %s, err: %v\n", backupPodName, err)
}
pvc, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, backupPVCName, metav1.GetOptions{})
if err != nil {
pvc = nil
diag += fmt.Sprintf("error getting backup pvc %s, err: %v\n", backupPVCName, err)
}
vs, err := e.csiSnapshotClient.VolumeSnapshots(ownerObject.Namespace).Get(ctx, backupVSName, metav1.GetOptions{})
if err != nil {
vs = nil
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
}
if pod != nil {
diag += kube.DiagnosePod(pod)
if pod.Spec.NodeName != "" {
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
diag += fmt.Sprintf("node-agent is not running in node %s, err: %v\n", pod.Spec.NodeName, err)
}
}
}
if pvc != nil {
diag += kube.DiagnosePVC(pvc)
if pvc.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting backup pv %s, err: %v\n", pvc.Spec.VolumeName, err)
} else {
diag += kube.DiagnosePV(pv)
}
}
}
if vs != nil {
diag += csi.DiagnoseVS(vs)
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting backup vsc %s, err: %v\n", *vs.Status.BoundVolumeSnapshotContentName, err)
} else {
diag += csi.DiagnoseVSC(vsc)
}
}
}
diag += "end diagnose CSI exposer"
return diag
}
const cleanUpTimeout = time.Minute
func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference, vsName string, sourceNamespace string) {

View File

@@ -959,3 +959,394 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
})
}
}
func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
backup := &velerov1.Backup{
TypeMeta: metav1.TypeMeta{
APIVersion: velerov1.SchemeGroupVersion.String(),
Kind: "Backup",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-uid",
},
}
backupPodWithoutNodeName := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodInitialized,
Status: corev1.ConditionTrue,
Message: "fake-pod-message",
},
},
},
}
backupPodWithNodeName := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Spec: corev1.PodSpec{
NodeName: "fake-node",
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodInitialized,
Status: corev1.ConditionTrue,
Message: "fake-pod-message",
},
},
},
}
backupPVCWithoutVolumeName := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimPending,
},
}
backupPVCWithVolumeName := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "fake-pv",
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimPending,
},
}
backupPV := corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv",
},
Status: corev1.PersistentVolumeStatus{
Phase: corev1.VolumePending,
Message: "fake-pv-message",
},
}
readyToUse := false
vscMessage := "fake-vsc-message"
backupVSC := snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
Status: &snapshotv1api.VolumeSnapshotContentStatus{
ReadyToUse: &readyToUse,
Error: &snapshotv1api.VolumeSnapshotError{
Message: &vscMessage,
},
},
}
backupVSWithoutStatus := snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
}
backupVSWithoutVSC := snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Status: &snapshotv1api.VolumeSnapshotStatus{},
}
vsMessage := "fake-vs-message"
backupVSWithVSC := snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Status: &snapshotv1api.VolumeSnapshotStatus{
BoundVolumeSnapshotContentName: &backupVSC.Name,
Error: &snapshotv1api.VolumeSnapshotError{
Message: &vsMessage,
},
},
}
nodeAgentPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "node-agent-pod-1",
Labels: map[string]string{"name": "node-agent"},
},
Spec: corev1.PodSpec{
NodeName: "fake-node",
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
tests := []struct {
name string
ownerBackup *velerov1.Backup
kubeClientObj []runtime.Object
snapshotClientObj []runtime.Object
expected string
}{
{
name: "no pod, pvc, vs",
ownerBackup: backup,
expected: `begin diagnose CSI exposer
error getting backup pod fake-backup, err: pods "fake-backup" not found
error getting backup pvc fake-backup, err: persistentvolumeclaims "fake-backup" not found
error getting backup vs fake-backup, err: volumesnapshots.snapshot.storage.k8s.io "fake-backup" not found
end diagnose CSI exposer`,
},
{
name: "pod without node name, pvc without volume name, vs without status",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithoutNodeName,
&backupPVCWithoutVolumeName,
},
snapshotClientObj: []runtime.Object{
&backupVSWithoutStatus,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to
VS velero/fake-backup, bind to , readyToUse false, errMessage
end diagnose CSI exposer`,
},
{
name: "pod without node name, pvc without volume name, vs without VSC",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithoutNodeName,
&backupPVCWithoutVolumeName,
},
snapshotClientObj: []runtime.Object{
&backupVSWithoutVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to
VS velero/fake-backup, bind to , readyToUse false, errMessage
end diagnose CSI exposer`,
},
{
name: "pod with node name, no node agent",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithoutVolumeName,
},
snapshotClientObj: []runtime.Object{
&backupVSWithoutVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
node-agent is not running in node fake-node, err: daemonset pod not found in running state in node fake-node
PVC velero/fake-backup, phase Pending, binding to
VS velero/fake-backup, bind to , readyToUse false, errMessage
end diagnose CSI exposer`,
},
{
name: "pod with node name, node agent is running",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithoutVolumeName,
&nodeAgentPod,
},
snapshotClientObj: []runtime.Object{
&backupVSWithoutVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to
VS velero/fake-backup, bind to , readyToUse false, errMessage
end diagnose CSI exposer`,
},
{
name: "pvc with volume name, no pv",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithVolumeName,
&nodeAgentPod,
},
snapshotClientObj: []runtime.Object{
&backupVSWithoutVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to fake-pv
error getting backup pv fake-pv, err: persistentvolumes "fake-pv" not found
VS velero/fake-backup, bind to , readyToUse false, errMessage
end diagnose CSI exposer`,
},
{
name: "pvc with volume name, pv exists",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithVolumeName,
&backupPV,
&nodeAgentPod,
},
snapshotClientObj: []runtime.Object{
&backupVSWithoutVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
VS velero/fake-backup, bind to , readyToUse false, errMessage
end diagnose CSI exposer`,
},
{
name: "vs with vsc, vsc doesn't exist",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithVolumeName,
&backupPV,
&nodeAgentPod,
},
snapshotClientObj: []runtime.Object{
&backupVSWithVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
error getting backup vsc fake-vsc, err: volumesnapshotcontents.snapshot.storage.k8s.io "fake-vsc" not found
end diagnose CSI exposer`,
},
{
name: "vs with vsc, vsc exists",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&backupPVCWithVolumeName,
&backupPV,
&nodeAgentPod,
},
snapshotClientObj: []runtime.Object{
&backupVSWithVSC,
&backupVSC,
},
expected: `begin diagnose CSI exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
end diagnose CSI exposer`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(tt.kubeClientObj...)
fakeSnapshotClient := snapshotFake.NewSimpleClientset(tt.snapshotClientObj...)
e := &csiSnapshotExposer{
kubeClient: fakeKubeClient,
csiSnapshotClient: fakeSnapshotClient.SnapshotV1(),
log: velerotest.NewLogger(),
}
var ownerObject corev1.ObjectReference
if tt.ownerBackup != nil {
ownerObject = corev1.ObjectReference{
Kind: tt.ownerBackup.Kind,
Namespace: tt.ownerBackup.Namespace,
Name: tt.ownerBackup.Name,
UID: tt.ownerBackup.UID,
APIVersion: tt.ownerBackup.APIVersion,
}
}
diag := e.DiagnoseExpose(context.Background(), ownerObject)
assert.Equal(t, tt.expected, diag)
})
}
}

View File

@@ -30,6 +30,7 @@ import (
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
@@ -49,6 +50,10 @@ type GenericRestoreExposer interface {
// Otherwise, it returns nil immediately.
PeekExposed(context.Context, corev1.ObjectReference) error
// DiagnoseExpose generate the diagnostic info when the expose is not finished for a long time.
// If it finds any problem, it returns an string about the problem.
DiagnoseExpose(context.Context, corev1.ObjectReference) string
// RebindVolume unexposes the restored PV and rebind it to the target PVC
RebindVolume(context.Context, corev1.ObjectReference, string, string, time.Duration) error
@@ -195,6 +200,51 @@ func (e *genericRestoreExposer) PeekExposed(ctx context.Context, ownerObject cor
return nil
}
func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject corev1.ObjectReference) string {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
diag := "begin diagnose restore exposer\n"
pod, err := e.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(ctx, restorePodName, metav1.GetOptions{})
if err != nil {
pod = nil
diag += fmt.Sprintf("error getting restore pod %s, err: %v\n", restorePodName, err)
}
pvc, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, restorePVCName, metav1.GetOptions{})
if err != nil {
pvc = nil
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
}
if pod != nil {
diag += kube.DiagnosePod(pod)
if pod.Spec.NodeName != "" {
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
diag += fmt.Sprintf("node-agent is not running in node %s, err: %v\n", pod.Spec.NodeName, err)
}
}
}
if pvc != nil {
diag += kube.DiagnosePVC(pvc)
if pvc.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting restore pv %s, err: %v\n", pvc.Spec.VolumeName, err)
} else {
diag += kube.DiagnosePV(pv)
}
}
}
diag += "end diagnose restore exposer"
return diag
}
func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name

View File

@@ -507,3 +507,258 @@ func TestRestorePeekExpose(t *testing.T) {
})
}
}
func Test_ReastoreDiagnoseExpose(t *testing.T) {
restore := &velerov1.Restore{
TypeMeta: metav1.TypeMeta{
APIVersion: velerov1.SchemeGroupVersion.String(),
Kind: "Restore",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
UID: "fake-uid",
},
}
restorePodWithoutNodeName := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
Kind: restore.Kind,
Name: restore.Name,
UID: restore.UID,
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodInitialized,
Status: corev1.ConditionTrue,
Message: "fake-pod-message",
},
},
},
}
restorePodWithNodeName := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
Kind: restore.Kind,
Name: restore.Name,
UID: restore.UID,
},
},
},
Spec: corev1.PodSpec{
NodeName: "fake-node",
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodInitialized,
Status: corev1.ConditionTrue,
Message: "fake-pod-message",
},
},
},
}
restorePVCWithoutVolumeName := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
Kind: restore.Kind,
Name: restore.Name,
UID: restore.UID,
},
},
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimPending,
},
}
restorePVCWithVolumeName := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
Kind: restore.Kind,
Name: restore.Name,
UID: restore.UID,
},
},
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "fake-pv",
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimPending,
},
}
restorePV := corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv",
},
Status: corev1.PersistentVolumeStatus{
Phase: corev1.VolumePending,
Message: "fake-pv-message",
},
}
nodeAgentPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "node-agent-pod-1",
Labels: map[string]string{"name": "node-agent"},
},
Spec: corev1.PodSpec{
NodeName: "fake-node",
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
tests := []struct {
name string
ownerRestore *velerov1.Restore
kubeClientObj []runtime.Object
expected string
}{
{
name: "no pod, pvc",
ownerRestore: restore,
expected: `begin diagnose restore exposer
error getting restore pod fake-restore, err: pods "fake-restore" not found
error getting restore pvc fake-restore, err: persistentvolumeclaims "fake-restore" not found
end diagnose restore exposer`,
},
{
name: "pod without node name, pvc without volume name, vs without status",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithoutNodeName,
&restorePVCWithoutVolumeName,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to
end diagnose restore exposer`,
},
{
name: "pod without node name, pvc without volume name",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithoutNodeName,
&restorePVCWithoutVolumeName,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to
end diagnose restore exposer`,
},
{
name: "pod with node name, no node agent",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithoutVolumeName,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
node-agent is not running in node fake-node, err: daemonset pod not found in running state in node fake-node
PVC velero/fake-restore, phase Pending, binding to
end diagnose restore exposer`,
},
{
name: "pod with node name, node agent is running",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithoutVolumeName,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to
end diagnose restore exposer`,
},
{
name: "pvc with volume name, no pv",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
error getting restore pv fake-pv, err: persistentvolumes "fake-pv" not found
end diagnose restore exposer`,
},
{
name: "pvc with volume name, pv exists",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&restorePV,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
end diagnose restore exposer`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
e := genericRestoreExposer{
kubeClient: fakeKubeClient,
log: velerotest.NewLogger(),
}
var ownerObject corev1api.ObjectReference
if test.ownerRestore != nil {
ownerObject = corev1api.ObjectReference{
Kind: test.ownerRestore.Kind,
Namespace: test.ownerRestore.Namespace,
Name: test.ownerRestore.Name,
UID: test.ownerRestore.UID,
APIVersion: test.ownerRestore.APIVersion,
}
}
diag := e.DiagnoseExpose(context.Background(), ownerObject)
assert.Equal(t, test.expected, diag)
})
}
}

View File

@@ -26,6 +26,24 @@ func (_m *GenericRestoreExposer) CleanUp(_a0 context.Context, _a1 v1.ObjectRefer
_m.Called(_a0, _a1)
}
// DiagnoseExpose provides a mock function with given fields: _a0, _a1
func (_m *GenericRestoreExposer) DiagnoseExpose(_a0 context.Context, _a1 v1.ObjectReference) string {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for DiagnoseExpose")
}
var r0 string
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference) string); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// Expose provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4, _a5, _a6
func (_m *GenericRestoreExposer) Expose(_a0 context.Context, _a1 v1.ObjectReference, _a2 string, _a3 string, _a4 map[string]string, _a5 v1.ResourceRequirements, _a6 time.Duration) error {
ret := _m.Called(_a0, _a1, _a2, _a3, _a4, _a5, _a6)

View File

@@ -37,6 +37,10 @@ type SnapshotExposer interface {
// Otherwise, it returns nil immediately.
PeekExposed(context.Context, corev1.ObjectReference) error
// DiagnoseExpose generate the diagnostic info when the expose is not finished for a long time.
// If it finds any problem, it returns an string about the problem.
DiagnoseExpose(context.Context, corev1.ObjectReference) string
// CleanUp cleans up any objects generated during the snapshot expose
CleanUp(context.Context, corev1.ObjectReference, string, string)
}

View File

@@ -38,7 +38,8 @@ const (
)
var (
ErrDaemonSetNotFound = errors.New("daemonset not found")
ErrDaemonSetNotFound = errors.New("daemonset not found")
ErrNodeAgentLabelNotFound = errors.New("node-agent label not found")
)
type LoadConcurrency struct {
@@ -99,8 +100,17 @@ func IsRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace s
}
}
// IsRunningInNode checks if the node agent pod is running properly in a specified node. If not, return the error found
// KbClientIsRunningInNode checks if the node agent pod is running properly in a specified node through kube client. If not, return the error found
func KbClientIsRunningInNode(ctx context.Context, namespace string, nodeName string, kubeClient kubernetes.Interface) error {
return isRunningInNode(ctx, namespace, nodeName, nil, kubeClient)
}
// IsRunningInNode checks if the node agent pod is running properly in a specified node through controller client. If not, return the error found
func IsRunningInNode(ctx context.Context, namespace string, nodeName string, crClient ctrlclient.Client) error {
return isRunningInNode(ctx, namespace, nodeName, crClient, nil)
}
func isRunningInNode(ctx context.Context, namespace string, nodeName string, crClient ctrlclient.Client, kubeClient kubernetes.Interface) error {
if nodeName == "" {
return errors.New("node name is empty")
}
@@ -111,7 +121,12 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, crC
return errors.Wrap(err, "fail to parse selector")
}
err = crClient.List(ctx, pods, &ctrlclient.ListOptions{LabelSelector: parsedSelector})
if crClient != nil {
err = crClient.List(ctx, pods, &ctrlclient.ListOptions{LabelSelector: parsedSelector})
} else {
pods, err = kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: parsedSelector.String()})
}
if err != nil {
return errors.Wrap(err, "failed to list daemonset pods")
}
@@ -161,3 +176,21 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int
return configs, nil
}
func GetLabelValue(ctx context.Context, kubeClient kubernetes.Interface, namespace string, key string) (string, error) {
ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{})
if err != nil {
return "", errors.Wrap(err, "error getting node-agent daemonset")
}
if ds.Spec.Template.Labels == nil {
return "", ErrNodeAgentLabelNotFound
}
val, found := ds.Spec.Template.Labels[key]
if !found {
return "", ErrNodeAgentLabelNotFound
}
return val, nil
}

View File

@@ -331,3 +331,132 @@ func TestGetConfigs(t *testing.T) {
})
}
}
func TestGetLabelValue(t *testing.T) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
}
daemonSetWithOtherLabel := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"fake-other-label": "fake-value-1",
},
},
},
},
}
daemonSetWithLabel := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"fake-label": "fake-value-2",
},
},
},
},
}
daemonSetWithEmptyLabel := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"fake-label": "",
},
},
},
},
}
tests := []struct {
name string
kubeClientObj []runtime.Object
namespace string
expectedValue string
expectErr string
}{
{
name: "ds get error",
namespace: "fake-ns",
expectErr: "error getting node-agent daemonset: daemonsets.apps \"node-agent\" not found",
},
{
name: "no label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSet,
},
expectErr: ErrNodeAgentLabelNotFound.Error(),
},
{
name: "no expecting label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithOtherLabel,
},
expectErr: ErrNodeAgentLabelNotFound.Error(),
},
{
name: "expecting label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithLabel,
},
expectedValue: "fake-value-2",
},
{
name: "expecting empty label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithEmptyLabel,
},
expectedValue: "",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label")
if test.expectErr == "" {
assert.NoError(t, err)
assert.Equal(t, test.expectedValue, value)
} else {
assert.EqualError(t, err, test.expectErr)
}
})
}
}

View File

@@ -48,6 +48,8 @@ type Backupper interface {
// BackupPodVolumes backs up all specified volumes in a pod.
BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, volumesToBackup []string, resPolicies *resourcepolicies.Policies, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, *PVCBackupSummary, []error)
WaitAllPodVolumesProcessed(log logrus.FieldLogger) []*velerov1api.PodVolumeBackup
GetPodVolumeBackup(namespace, name string) (*velerov1api.PodVolumeBackup, error)
ListPodVolumeBackupsByPod(podNamespace, podName string) ([]*velerov1api.PodVolumeBackup, error)
}
type backupper struct {
@@ -59,7 +61,10 @@ type backupper struct {
pvbInformer ctrlcache.Informer
handlerRegistration cache.ResourceEventHandlerRegistration
wg sync.WaitGroup
result []*velerov1api.PodVolumeBackup
// pvbIndexer holds all PVBs created by this backuper and is capable to search
// the PVBs based on specific properties quickly because of the embedded indexes.
// The statuses of the PVBs are got updated when Informer receives update events.
pvbIndexer cache.Indexer
}
type skippedPVC struct {
@@ -101,8 +106,22 @@ func (pbs *PVCBackupSummary) addSkipped(volumeName string, reason string) {
}
}
const indexNamePod = "POD"
func podIndexFunc(obj interface{}) ([]string, error) {
pvb, ok := obj.(*velerov1api.PodVolumeBackup)
if !ok {
return nil, errors.Errorf("expected PodVolumeBackup, but got %T", obj)
}
if pvb == nil {
return nil, errors.New("PodVolumeBackup is nil")
}
return []string{cache.NewObjectName(pvb.Spec.Pod.Namespace, pvb.Spec.Pod.Name).String()}, nil
}
func newBackupper(
ctx context.Context,
log logrus.FieldLogger,
repoLocker *repository.RepoLocker,
repoEnsurer *repository.Ensurer,
pvbInformer ctrlcache.Informer,
@@ -118,13 +137,19 @@ func newBackupper(
uploaderType: uploaderType,
pvbInformer: pvbInformer,
wg: sync.WaitGroup{},
result: []*velerov1api.PodVolumeBackup{},
pvbIndexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
indexNamePod: podIndexFunc,
}),
}
b.handlerRegistration, _ = pvbInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, obj interface{}) {
pvb := obj.(*velerov1api.PodVolumeBackup)
pvb, ok := obj.(*velerov1api.PodVolumeBackup)
if !ok {
log.Errorf("expected PodVolumeBackup, but got %T", obj)
return
}
if pvb.GetLabels()[velerov1api.BackupUIDLabel] != string(backup.UID) {
return
@@ -135,7 +160,10 @@ func newBackupper(
return
}
b.result = append(b.result, pvb)
// the Indexer inserts PVB directly if the PVB to be updated doesn't exist
if err := b.pvbIndexer.Update(pvb); err != nil {
log.WithError(err).Errorf("failed to update PVB %s/%s in indexer", pvb.Namespace, pvb.Name)
}
b.wg.Done()
},
},
@@ -312,6 +340,12 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
continue
}
b.wg.Add(1)
if err := b.pvbIndexer.Add(volumeBackup); err != nil {
errs = append(errs, errors.Wrapf(err, "failed to add PodVolumeBackup %s/%s to indexer", volumeBackup.Namespace, volumeBackup.Name))
continue
}
podVolumeBackups = append(podVolumeBackups, volumeBackup)
pvcSummary.addBackedup(volumeName)
}
@@ -337,7 +371,12 @@ func (b *backupper) WaitAllPodVolumesProcessed(log logrus.FieldLogger) []*velero
case <-b.ctx.Done():
log.Error("timed out waiting for all PodVolumeBackups to complete")
case <-done:
for _, pvb := range b.result {
for _, obj := range b.pvbIndexer.List() {
pvb, ok := obj.(*velerov1api.PodVolumeBackup)
if !ok {
log.Errorf("expected PodVolumeBackup, but got %T", obj)
continue
}
podVolumeBackups = append(podVolumeBackups, pvb)
if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed {
log.Errorf("pod volume backup failed: %s", pvb.Status.Message)
@@ -347,6 +386,37 @@ func (b *backupper) WaitAllPodVolumesProcessed(log logrus.FieldLogger) []*velero
return podVolumeBackups
}
func (b *backupper) GetPodVolumeBackup(namespace, name string) (*velerov1api.PodVolumeBackup, error) {
obj, exist, err := b.pvbIndexer.GetByKey(cache.NewObjectName(namespace, name).String())
if err != nil {
return nil, err
}
if !exist {
return nil, nil
}
pvb, ok := obj.(*velerov1api.PodVolumeBackup)
if !ok {
return nil, errors.Errorf("expected PodVolumeBackup, but got %T", obj)
}
return pvb, nil
}
func (b *backupper) ListPodVolumeBackupsByPod(podNamespace, podName string) ([]*velerov1api.PodVolumeBackup, error) {
objs, err := b.pvbIndexer.ByIndex(indexNamePod, cache.NewObjectName(podNamespace, podName).String())
if err != nil {
return nil, err
}
var pvbs []*velerov1api.PodVolumeBackup
for _, obj := range objs {
pvb, ok := obj.(*velerov1api.PodVolumeBackup)
if !ok {
return nil, errors.Errorf("expected PodVolumeBackup, but got %T", obj)
}
pvbs = append(pvbs, pvb)
}
return pvbs, nil
}
func skipAllPodVolumes(pod *corev1api.Pod, volumesToBackup []string, err error, pvcSummary *PVCBackupSummary, log logrus.FieldLogger) {
for _, volumeName := range volumesToBackup {
log.WithError(err).Warnf("Skip pod volume %s", volumeName)

View File

@@ -32,7 +32,7 @@ import (
// BackupperFactory can construct pod volumes backuppers.
type BackupperFactory interface {
// NewBackupper returns a pod volumes backupper for use during a single Velero backup.
NewBackupper(context.Context, *velerov1api.Backup, string) (Backupper, error)
NewBackupper(context.Context, logrus.FieldLogger, *velerov1api.Backup, string) (Backupper, error)
}
func NewBackupperFactory(
@@ -59,8 +59,8 @@ type backupperFactory struct {
log logrus.FieldLogger
}
func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1api.Backup, uploaderType string) (Backupper, error) {
b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, bf.pvbInformer, bf.crClient, uploaderType, backup)
func (bf *backupperFactory) NewBackupper(ctx context.Context, log logrus.FieldLogger, backup *velerov1api.Backup, uploaderType string) (Backupper, error) {
b := newBackupper(ctx, log, bf.repoLocker, bf.repoEnsurer, bf.pvbInformer, bf.crClient, uploaderType, backup)
if !cache.WaitForCacheSync(ctx.Done(), bf.pvbInformer.HasSynced) {
return nil, errors.New("timed out waiting for caches to sync")

View File

@@ -307,6 +307,7 @@ func TestBackupPodVolumes(t *testing.T) {
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
corev1api.AddToScheme(scheme)
log := logrus.New()
tests := []struct {
name string
@@ -556,7 +557,7 @@ func TestBackupPodVolumes(t *testing.T) {
backupObj.Spec.StorageLocation = test.bsl
factory := NewBackupperFactory(repository.NewRepoLocker(), ensurer, fakeCtrlClient, pvbInformer, velerotest.NewLogger())
bp, err := factory.NewBackupper(ctx, backupObj, test.uploaderType)
bp, err := factory.NewBackupper(ctx, log, backupObj, test.uploaderType)
require.NoError(t, err)
@@ -581,6 +582,91 @@ func TestBackupPodVolumes(t *testing.T) {
}
}
func TestGetPodVolumeBackup(t *testing.T) {
backupper := &backupper{
pvbIndexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
indexNamePod: podIndexFunc,
}),
}
obj := &velerov1api.PodVolumeBackup{
ObjectMeta: metav1.ObjectMeta{
Namespace: "velero",
Name: "pvb",
},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
err := backupper.pvbIndexer.Add(obj)
require.NoError(t, err)
// not exist PVB
pvb, err := backupper.GetPodVolumeBackup("invalid-namespace", "invalid-name")
require.NoError(t, err)
assert.Nil(t, pvb)
// exist PVB
pvb, err = backupper.GetPodVolumeBackup("velero", "pvb")
require.NoError(t, err)
assert.NotNil(t, pvb)
}
func TestListPodVolumeBackupsByPodp(t *testing.T) {
backupper := &backupper{
pvbIndexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
indexNamePod: podIndexFunc,
}),
}
obj1 := &velerov1api.PodVolumeBackup{
ObjectMeta: metav1.ObjectMeta{
Namespace: "velero",
Name: "pvb1",
},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
obj2 := &velerov1api.PodVolumeBackup{
ObjectMeta: metav1.ObjectMeta{
Namespace: "velero",
Name: "pvb2",
},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
err := backupper.pvbIndexer.Add(obj1)
require.NoError(t, err)
err = backupper.pvbIndexer.Add(obj2)
require.NoError(t, err)
// not exist PVBs
pvbs, err := backupper.ListPodVolumeBackupsByPod("invalid-namespace", "invalid-name")
require.NoError(t, err)
assert.Empty(t, pvbs)
// exist PVBs
pvbs, err = backupper.ListPodVolumeBackupsByPod("default", "pod")
require.NoError(t, err)
assert.Len(t, pvbs, 2)
}
type logHook struct {
entry *logrus.Entry
}
@@ -598,6 +684,7 @@ func TestWaitAllPodVolumesProcessed(t *testing.T) {
defer func() {
cancelFunc()
}()
log := logrus.New()
cases := []struct {
name string
ctx context.Context
@@ -653,7 +740,7 @@ func TestWaitAllPodVolumesProcessed(t *testing.T) {
logHook := &logHook{}
logger.Hooks.Add(logHook)
backuper := newBackupper(c.ctx, nil, nil, informer, nil, "", &velerov1api.Backup{})
backuper := newBackupper(c.ctx, log, nil, nil, informer, nil, "", &velerov1api.Backup{})
backuper.wg.Add(1)
if c.statusToBeUpdated != nil {

View File

@@ -121,13 +121,24 @@ func GetS3Credentials(config map[string]string) (*aws.Credentials, error) {
// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error
// if the region cannot be determined.
func GetAWSBucketRegion(bucket string) (string, error) {
cfg, err := awsconfig.LoadDefaultConfig(context.Background())
// It will use us-east-1 as hinting server and requires config param to use as credentials
func GetAWSBucketRegion(bucket string, config map[string]string) (string, error) {
cfg, err := awsconfig.LoadDefaultConfig(context.Background(), awsconfig.WithCredentialsProvider(
aws.CredentialsProviderFunc(
func(context.Context) (aws.Credentials, error) {
s3creds, err := GetS3Credentials(config)
if s3creds == nil {
return aws.Credentials{}, err
}
return *s3creds, err
},
),
))
if err != nil {
return "", errors.WithStack(err)
}
client := s3.NewFromConfig(cfg)
region, err := s3manager.GetBucketRegion(context.Background(), client, bucket)
region, err := s3manager.GetBucketRegion(context.Background(), client, bucket, func(o *s3.Options) { o.Region = "us-east-1" })
if err != nil {
return "", errors.WithStack(err)
}

View File

@@ -72,7 +72,7 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error)
var err error
region := location.Spec.Config["region"]
if region == "" {
region, err = getAWSBucketRegion(bucket)
region, err = getAWSBucketRegion(bucket, location.Spec.Config)
}
if err != nil {
return "", errors.Wrapf(err, "failed to detect the region via bucket: %s", bucket)

View File

@@ -30,7 +30,7 @@ func TestGetRepoIdentifier(t *testing.T) {
name string
bsl *velerov1api.BackupStorageLocation
repoName string
getAWSBucketRegion func(string) (string, error)
getAWSBucketRegion func(s string, config map[string]string) (string, error)
expected string
expectedErr string
}{
@@ -101,7 +101,7 @@ func TestGetRepoIdentifier(t *testing.T) {
},
},
repoName: "repo-1",
getAWSBucketRegion: func(string) (string, error) {
getAWSBucketRegion: func(s string, config map[string]string) (string, error) {
return "", errors.New("no region found")
},
expected: "",
@@ -120,7 +120,7 @@ func TestGetRepoIdentifier(t *testing.T) {
},
},
repoName: "repo-1",
getAWSBucketRegion: func(string) (string, error) {
getAWSBucketRegion: func(string, map[string]string) (string, error) {
return "eu-west-1", nil
},
expected: "s3:s3-eu-west-1.amazonaws.com/bucket/restic/repo-1",
@@ -139,7 +139,7 @@ func TestGetRepoIdentifier(t *testing.T) {
},
},
repoName: "repo-1",
getAWSBucketRegion: func(string) (string, error) {
getAWSBucketRegion: func(s string, config map[string]string) (string, error) {
return "eu-west-1", nil
},
expected: "s3:s3-eu-west-1.amazonaws.com/bucket/prefix/restic/repo-1",
@@ -161,7 +161,7 @@ func TestGetRepoIdentifier(t *testing.T) {
},
},
repoName: "repo-1",
getAWSBucketRegion: func(string) (string, error) {
getAWSBucketRegion: func(s string, config map[string]string) (string, error) {
return "eu-west-1", nil
},
expected: "s3:alternate-url/bucket/prefix/restic/repo-1",
@@ -183,7 +183,7 @@ func TestGetRepoIdentifier(t *testing.T) {
},
},
repoName: "aws-repo",
getAWSBucketRegion: func(string) (string, error) {
getAWSBucketRegion: func(s string, config map[string]string) (string, error) {
return "eu-west-1", nil
},
expected: "s3:s3-us-west-1.amazonaws.com/bucket/prefix/restic/aws-repo",
@@ -205,7 +205,7 @@ func TestGetRepoIdentifier(t *testing.T) {
},
},
repoName: "aws-repo",
getAWSBucketRegion: func(string) (string, error) {
getAWSBucketRegion: func(s string, config map[string]string) (string, error) {
return "eu-west-1", nil
},
expected: "s3:alternate-url-with-trailing-slash/bucket/prefix/restic/aws-repo",

View File

@@ -117,7 +117,20 @@ func GetMaintenanceResultFromJob(cli client.Client, job *batchv1.Job) (string, e
}
// we only have one maintenance pod for the job
return podList.Items[0].Status.ContainerStatuses[0].State.Terminated.Message, nil
pod := podList.Items[0]
statuses := pod.Status.ContainerStatuses
if len(statuses) == 0 {
return "", fmt.Errorf("no container statuses found for job %s", job.Name)
}
// we only have one maintenance container
terminated := statuses[0].State.Terminated
if terminated == nil {
return "", fmt.Errorf("container for job %s is not terminated", job.Name)
}
return terminated.Message, nil
}
func GetLatestMaintenanceJob(cli client.Client, ns string) (*batchv1.Job, error) {

View File

@@ -188,33 +188,54 @@ func TestGetMaintenanceResultFromJob(t *testing.T) {
},
}
// Set up test pod
// Set up test pod with no status
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
Labels: map[string]string{"job-name": job.Name},
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Message: "test message",
},
}
// Create a fake Kubernetes client
cli := fake.NewClientBuilder().WithObjects(job, pod).Build()
// test an error should be returned
result, err := GetMaintenanceResultFromJob(cli, job)
assert.Error(t, err)
assert.Equal(t, "", result)
// Set a non-terminated container status to the pod
pod.Status = v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{},
},
},
}
// Test an error should be returned
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
result, err = GetMaintenanceResultFromJob(cli, job)
assert.Error(t, err)
assert.Equal(t, "", result)
// Set a terminated container status to the pod
pod.Status = v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Message: "test message",
},
},
},
},
}
// Create a fake Kubernetes client
cli := fake.NewClientBuilder().WithObjects(job, pod).Build()
// Call the function
result, err := GetMaintenanceResultFromJob(cli, job)
// Check if the result and error match the expectation
// This call should return the termination message with no error
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
result, err = GetMaintenanceResultFromJob(cli, job)
assert.NoError(t, err)
assert.Equal(t, "test message", result)
}

View File

@@ -529,7 +529,7 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo
var err error
if s3URL == "" {
if region == "" {
region, err = getS3BucketRegion(bucket)
region, err = getS3BucketRegion(bucket, config)
if err != nil {
return map[string]string{}, errors.Wrap(err, "error get s3 bucket region")
}

View File

@@ -222,7 +222,7 @@ func TestGetStorageVariables(t *testing.T) {
repoName string
repoBackend string
repoConfig map[string]string
getS3BucketRegion func(string) (string, error)
getS3BucketRegion func(bucket string, config map[string]string) (string, error)
expected map[string]string
expectedErr string
}{
@@ -291,7 +291,7 @@ func TestGetStorageVariables(t *testing.T) {
},
},
},
getS3BucketRegion: func(bucket string) (string, error) {
getS3BucketRegion: func(bucket string, config map[string]string) (string, error) {
return "region from bucket: " + bucket, nil
},
repoBackend: "fake-repo-type",
@@ -313,7 +313,7 @@ func TestGetStorageVariables(t *testing.T) {
Config: map[string]string{},
},
},
getS3BucketRegion: func(bucket string) (string, error) {
getS3BucketRegion: func(bucket string, config map[string]string) (string, error) {
return "", errors.New("fake error")
},
expected: map[string]string{},
@@ -339,7 +339,7 @@ func TestGetStorageVariables(t *testing.T) {
},
},
},
getS3BucketRegion: func(bucket string) (string, error) {
getS3BucketRegion: func(bucket string, config map[string]string) (string, error) {
return "region from bucket: " + bucket, nil
},
repoBackend: "fake-repo-type",
@@ -374,7 +374,7 @@ func TestGetStorageVariables(t *testing.T) {
},
},
},
getS3BucketRegion: func(bucket string) (string, error) {
getS3BucketRegion: func(bucket string, config map[string]string) (string, error) {
return "region from bucket: " + bucket, nil
},
repoBackend: "fake-repo-type",

View File

@@ -21,6 +21,8 @@ import (
"fmt"
"strings"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
@@ -44,6 +46,7 @@ const (
defaultCPURequestLimit = "100m"
defaultMemRequestLimit = "128Mi"
defaultCommand = "/velero-restore-helper"
restoreHelperUID = 1000
)
type PodVolumeRestoreAction struct {
@@ -143,9 +146,15 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
runAsUser, runAsGroup, allowPrivilegeEscalation, secCtx := getSecurityContext(log, config)
securityContext, err := kube.ParseSecurityContext(runAsUser, runAsGroup, allowPrivilegeEscalation, secCtx)
if err != nil {
log.Errorf("Using default securityContext values, couldn't parse securityContext requirements: %s.", err)
var securityContext corev1.SecurityContext
if runAsUser == "" && runAsGroup == "" && allowPrivilegeEscalation == "" && secCtx == "" {
securityContext = defaultSecurityCtx()
} else {
securityContext, err = kube.ParseSecurityContext(runAsUser, runAsGroup, allowPrivilegeEscalation, secCtx)
if err != nil {
log.Errorf("Using default securityContext values, couldn't parse securityContext requirements: %s.", err)
securityContext = defaultSecurityCtx()
}
}
initContainerBuilder := newRestoreInitContainerBuilder(image, string(input.Restore.UID))
@@ -282,3 +291,20 @@ func newRestoreInitContainerBuilder(image, restoreUID string) *builder.Container
},
}...)
}
// defaultSecurityCtx returns a default security context for the init container, which has the level "restricted" per
// Pod Security Standards.
func defaultSecurityCtx() corev1.SecurityContext {
uid := int64(restoreHelperUID)
return corev1.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
RunAsUser: &uid,
RunAsNonRoot: boolptr.True(),
}
}

View File

@@ -20,6 +20,8 @@ import (
"sort"
"testing"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -113,8 +115,18 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) {
defaultCPURequestLimit, defaultMemRequestLimit, // requests
defaultCPURequestLimit, defaultMemRequestLimit, // limits
)
securityContext, _ := kube.ParseSecurityContext("", "", "", "")
id := int64(1000)
securityContext := corev1api.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &corev1api.Capabilities{
Drop: []corev1api.Capability{"ALL"},
},
SeccompProfile: &corev1api.SeccompProfile{
Type: corev1api.SeccompProfileTypeRuntimeDefault,
},
RunAsUser: &id,
RunAsNonRoot: boolptr.True(),
}
var (
restoreName = "my-restore"

View File

@@ -16,6 +16,9 @@ limitations under the License.
package test
import (
"fmt"
"strings"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/mock"
@@ -33,6 +36,10 @@ type HookExecutionEntry struct {
HookCommand []string
}
func (h HookExecutionEntry) String() string {
return fmt.Sprintf("%s.%s.%s.%s", h.Namespace, h.Name, h.HookName, strings.Join(h.HookCommand, ","))
}
func (e *MockPodCommandExecutor) ExecutePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
e.HookExecutionLog = append(e.HookExecutionLog, HookExecutionEntry{
Namespace: namespace,

View File

@@ -33,7 +33,7 @@ type BlockOutput struct {
targetFileName string
}
func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remoteFile fs.File) error {
func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remoteFile fs.File, progressCb restore.FileWriteProgress) error {
return fmt.Errorf("block mode is not supported for Windows")
}

View File

@@ -773,3 +773,51 @@ func WaitUntilVSCHandleIsReady(
return vsc, nil
}
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
vscName := ""
readyToUse := false
errMessage := ""
if vs.Status != nil {
if vs.Status.BoundVolumeSnapshotContentName != nil {
vscName = *vs.Status.BoundVolumeSnapshotContentName
}
if vs.Status.ReadyToUse != nil {
readyToUse = *vs.Status.ReadyToUse
}
if vs.Status.Error != nil && vs.Status.Error.Message != nil {
errMessage = *vs.Status.Error.Message
}
}
diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage)
return diag
}
func DiagnoseVSC(vsc *snapshotv1api.VolumeSnapshotContent) string {
handle := ""
readyToUse := false
errMessage := ""
if vsc.Status != nil {
if vsc.Status.SnapshotHandle != nil {
handle = *vsc.Status.SnapshotHandle
}
if vsc.Status.ReadyToUse != nil {
readyToUse = *vsc.Status.ReadyToUse
}
if vsc.Status.Error != nil && vsc.Status.Error.Message != nil {
errMessage = *vsc.Status.Error.Message
}
}
diag := fmt.Sprintf("VSC %s, readyToUse %v, errMessage %s, handle %s\n", vsc.Name, readyToUse, errMessage, handle)
return diag
}

View File

@@ -1655,3 +1655,197 @@ func TestWaitUntilVSCHandleIsReady(t *testing.T) {
})
}
}
func TestDiagnoseVS(t *testing.T) {
vscName := "fake-vsc"
readyToUse := true
message := "fake-message"
testCases := []struct {
name string
vs *snapshotv1api.VolumeSnapshot
expected string
}{
{
name: "VS with no status",
vs: &snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vs",
Namespace: "fake-ns",
},
},
expected: "VS fake-ns/fake-vs, bind to , readyToUse false, errMessage \n",
},
{
name: "VS with empty status",
vs: &snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vs",
Namespace: "fake-ns",
},
Status: &snapshotv1api.VolumeSnapshotStatus{},
},
expected: "VS fake-ns/fake-vs, bind to , readyToUse false, errMessage \n",
},
{
name: "VS with VSC name",
vs: &snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vs",
Namespace: "fake-ns",
},
Status: &snapshotv1api.VolumeSnapshotStatus{
BoundVolumeSnapshotContentName: &vscName,
},
},
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse false, errMessage \n",
},
{
name: "VS with VSC name+ready",
vs: &snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vs",
Namespace: "fake-ns",
},
Status: &snapshotv1api.VolumeSnapshotStatus{
BoundVolumeSnapshotContentName: &vscName,
ReadyToUse: &readyToUse,
},
},
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
},
{
name: "VS with VSC name+ready+empty error",
vs: &snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vs",
Namespace: "fake-ns",
},
Status: &snapshotv1api.VolumeSnapshotStatus{
BoundVolumeSnapshotContentName: &vscName,
ReadyToUse: &readyToUse,
Error: &snapshotv1api.VolumeSnapshotError{},
},
},
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
},
{
name: "VS with VSC name+ready+error",
vs: &snapshotv1api.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vs",
Namespace: "fake-ns",
},
Status: &snapshotv1api.VolumeSnapshotStatus{
BoundVolumeSnapshotContentName: &vscName,
ReadyToUse: &readyToUse,
Error: &snapshotv1api.VolumeSnapshotError{
Message: &message,
},
},
},
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diag := DiagnoseVS(tc.vs)
assert.Equal(t, tc.expected, diag)
})
}
}
func TestDiagnoseVSC(t *testing.T) {
readyToUse := true
message := "fake-message"
handle := "fake-handle"
testCases := []struct {
name string
vsc *snapshotv1api.VolumeSnapshotContent
expected string
}{
{
name: "VS with no status",
vsc: &snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
},
expected: "VSC fake-vsc, readyToUse false, errMessage , handle \n",
},
{
name: "VSC with empty status",
vsc: &snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
Status: &snapshotv1api.VolumeSnapshotContentStatus{},
},
expected: "VSC fake-vsc, readyToUse false, errMessage , handle \n",
},
{
name: "VSC with ready",
vsc: &snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
Status: &snapshotv1api.VolumeSnapshotContentStatus{
ReadyToUse: &readyToUse,
},
},
expected: "VSC fake-vsc, readyToUse true, errMessage , handle \n",
},
{
name: "VSC with ready+handle",
vsc: &snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
Status: &snapshotv1api.VolumeSnapshotContentStatus{
ReadyToUse: &readyToUse,
SnapshotHandle: &handle,
},
},
expected: "VSC fake-vsc, readyToUse true, errMessage , handle fake-handle\n",
},
{
name: "VSC with ready+handle+empty error",
vsc: &snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
Status: &snapshotv1api.VolumeSnapshotContentStatus{
ReadyToUse: &readyToUse,
SnapshotHandle: &handle,
Error: &snapshotv1api.VolumeSnapshotError{},
},
},
expected: "VSC fake-vsc, readyToUse true, errMessage , handle fake-handle\n",
},
{
name: "VSC with ready+handle+error",
vsc: &snapshotv1api.VolumeSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-vsc",
},
Status: &snapshotv1api.VolumeSnapshotContentStatus{
ReadyToUse: &readyToUse,
SnapshotHandle: &handle,
Error: &snapshotv1api.VolumeSnapshotError{
Message: &message,
},
},
},
expected: "VSC fake-vsc, readyToUse true, errMessage fake-message, handle fake-handle\n",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diag := DiagnoseVSC(tc.vsc)
assert.Equal(t, tc.expected, diag)
})
}
}

View File

@@ -257,3 +257,13 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity {
return nil
}
func DiagnosePod(pod *corev1api.Pod) string {
diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName)
for _, condition := range pod.Status.Conditions {
diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message)
}
return diag
}

View File

@@ -846,3 +846,49 @@ func TestToSystemAffinity(t *testing.T) {
})
}
}
func TestDiagnosePod(t *testing.T) {
testCases := []struct {
name string
pod *corev1api.Pod
expected string
}{
{
name: "pod with all info",
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pod",
Namespace: "fake-ns",
},
Spec: corev1api.PodSpec{
NodeName: "fake-node",
},
Status: corev1api.PodStatus{
Phase: corev1api.PodPending,
Conditions: []corev1api.PodCondition{
{
Type: corev1api.PodInitialized,
Status: corev1api.ConditionTrue,
Reason: "fake-reason-1",
Message: "fake-message-1",
},
{
Type: corev1api.PodScheduled,
Status: corev1api.ConditionFalse,
Reason: "fake-reason-2",
Message: "fake-message-2",
},
},
},
},
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diag := DiagnosePod(tc.pod)
assert.Equal(t, tc.expected, diag)
})
}
}

View File

@@ -412,3 +412,12 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl
return pvc, nil
}
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string {
return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
}
func DiagnosePV(pv *corev1api.PersistentVolume) string {
diag := fmt.Sprintf("PV %s, phase %s, reason %s, message %s\n", pv.Name, pv.Status.Phase, pv.Status.Reason, pv.Status.Message)
return diag
}

View File

@@ -1463,3 +1463,65 @@ func TestMakePodPVCAttachment(t *testing.T) {
})
}
}
func TestDiagnosePVC(t *testing.T) {
testCases := []struct {
name string
pvc *corev1api.PersistentVolumeClaim
expected string
}{
{
name: "pvc with all info",
pvc: &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pvc",
Namespace: "fake-ns",
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "fake-pv",
},
Status: corev1api.PersistentVolumeClaimStatus{
Phase: corev1api.ClaimPending,
},
},
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diag := DiagnosePVC(tc.pvc)
assert.Equal(t, tc.expected, diag)
})
}
}
func TestDiagnosePV(t *testing.T) {
testCases := []struct {
name string
pv *corev1api.PersistentVolume
expected string
}{
{
name: "pv with all info",
pv: &corev1api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv",
},
Status: corev1api.PersistentVolumeStatus{
Phase: corev1api.VolumePending,
Message: "fake-message",
Reason: "fake-reason",
},
},
expected: "PV fake-pv, phase Pending, reason fake-reason, message fake-message\n",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diag := DiagnosePV(tc.pv)
assert.Equal(t, tc.expected, diag)
})
}
}

21
pkg/util/third_party.go Normal file
View File

@@ -0,0 +1,21 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
var ThirdPartyLabels []string = []string{
"azure.workload.identity/use",
}

View File

@@ -41,25 +41,41 @@ help: ## Display this help
TOOLS_DIR := $(REPO_ROOT)/hack/tools
BIN_DIR := bin
# Try to not modify PATH if possible
GOBIN := $(REPO_ROOT)/.go/bin
TOOLS_BIN_DIR := $(TOOLS_DIR)/$(BIN_DIR)
GINKGO := $(GOBIN)/ginkgo
KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize
OUTPUT_DIR := _output/$(GOOS)/$(GOARCH)/bin
# Please reference to this document for Ginkgo label spec format.
# https://onsi.github.io/ginkgo/#spec-labels
GINKGO_LABELS ?=
# When --fail-fast is set, the entire suite will stop when the first failure occurs.
# Enable --fail-fast by default.
# https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
FAIL_FAST ?= false
VELERO_CLI ?=$$(pwd)/../_output/bin/$(GOOS)/$(GOARCH)/velero
VELERO_IMAGE ?= velero/velero:main
PLUGINS ?=
# Flag used to tell E2E whether the Velero vSphere plugin is installed.
HAS_VSPHERE_PLUGIN ?= false
RESTORE_HELPER_IMAGE ?=
#Released version only
UPGRADE_FROM_VELERO_VERSION ?= v1.13.2,v1.14.1
# UPGRADE_FROM_VELERO_CLI can has the same format(a list divided by comma) with UPGRADE_FROM_VELERO_VERSION
# Upgrade tests will be executed sequently according to the list by UPGRADE_FROM_VELERO_VERSION
# So although length of UPGRADE_FROM_VELERO_CLI list is not equal with UPGRADE_FROM_VELERO_VERSION
@@ -150,7 +166,8 @@ COMMON_ARGS := --velerocli=$(VELERO_CLI) \
--velero-server-debug-mode=$(VELERO_SERVER_DEBUG_MODE) \
--uploader-type=$(UPLOADER_TYPE) \
--debug-velero-pod-restart=$(DEBUG_VELERO_POD_RESTART) \
--fail-fast=$(FAIL_FAST)
--fail-fast=$(FAIL_FAST) \
--has-vsphere-plugin=$(HAS_VSPHERE_PLUGIN)
# Make sure ginkgo is in $GOBIN
.PHONY:ginkgo
@@ -197,7 +214,7 @@ run-e2e: ginkgo
--standby-cluster-name=$(STANDBY_CLUSTER_NAME) \
--eks-policy-arn=$(EKS_POLICY_ARN) \
--default-cls-service-account-name=$(DEFAULT_CLS_SERVICE_ACCOUNT_NAME) \
--standby-cls-service-account-name=$(STANDBY_CLS_SERVICE_ACCOUNT_NAME)
--standby-cls-service-account-name=$(STANDBY_CLS_SERVICE_ACCOUNT_NAME) \
--kibishii-directory=$(KIBISHII_DIRECTORY) \
--disable-informer-cache=$(DISABLE_INFORMER_CACHE)

View File

@@ -78,6 +78,7 @@ These configuration parameters are expected as values to the following command l
1. `--standby-cluster-object-store-provider`: Object store provider for standby cluster.
1. `--debug-velero-pod-restart`: A switch for debugging velero pod restart.
1. `--fail-fast`: A switch for for failing fast on meeting error.
1. `--has-vsphere-plugin`: A switch to indicate whether the Velero vSphere plugin is installed for vSphere environment.
These configurations or parameters are used to generate install options for Velero for each test suite.
@@ -129,12 +130,13 @@ Below is a mapping between `make` variables to E2E configuration flags.
1. `INSTALL_VELERO `: `-install-velero`. Optional.
1. `DEBUG_VELERO_POD_RESTART`: `-debug-velero-pod-restart`. Optional.
1. `FAIL_FAST`: `--fail-fast`. Optional.
1. `HAS_VSPHERE_PLUGIN`: `--has-vsphere-plugin`. Optional.
### Examples
Basic examples:
#### Basic examples:
1. Run Velero tests in a kind cluster with AWS (or MinIO) as the storage provider:
@@ -208,7 +210,7 @@ ADDITIONAL_CREDS_FILE=/path/to/azure-creds \
make test-e2e
```
Upgrade examples:
#### Upgrade examples:
1. Run Velero upgrade tests with pre-upgrade version:
@@ -234,7 +236,7 @@ UPGRADE_FROM_VELERO_VERSION=v1.10.2,v1.11.0 \
make test-e2e
```
Migration examples:
#### Migration examples:
1. Migration between 2 cluster of the same provider tests:
@@ -275,7 +277,7 @@ GINKGO_LABELS="Migration" \
make test-e2e
```
## 5. Filtering tests
#### Filtering tests
In release-1.15, Velero bumps the [Ginkgo](https://onsi.github.io/ginkgo/) version to [v2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2).
Velero E2E start to use [labels](https://onsi.github.io/ginkgo/#spec-labels) to filter cases instead of [`-focus` and `-skip`](https://onsi.github.io/ginkgo/#focused-specs) parameters.
@@ -285,7 +287,6 @@ Both `make run-e2e` and `make run-perf` CLI support using parameter `GINKGO_LABE
`GINKGO_LABELS` is interpreted into `ginkgo run` CLI's parameter [`--label-filter`](https://onsi.github.io/ginkgo/#spec-labels).
### Examples
E2E tests can be run with specific cases to be included and/or excluded using the commands below:
1. Run Velero tests with specific cases to be included:
@@ -316,6 +317,44 @@ In this example, cases are labelled as
* `Migration` and `Restic`
will be skipped.
#### VKS environment test
1. Run the CSI data mover test.
`HAS_VSPHERE_PLUGIN` should be set to `false` to not install the Velero vSphere plugin.
``` bash
CLOUD_PROVIDER=vsphere \
DEFAULT_CLUSTER=wl-antreav1301 \
STANDBY_CLUSTER=wl-antreav1311 \
DEFAULT_CLUSTER_NAME=192.168.0.4 \
STANDBY_CLUSTER_NAME=192.168.0.3 \
FEATURES=EnableCSI \
PLUGINS=gcr.io/velero-gcp/velero-plugin-for-aws:main \
HAS_VSPHERE_PLUGIN=false \
OBJECT_STORE_PROVIDER=aws \
CREDS_FILE=$HOME/aws-credential \
BSL_CONFIG=region=us-east-1 \
BSL_BUCKET=nightly-normal-account4-test \
BSL_PREFIX=nightly \
ADDITIONAL_BSL_PLUGINS=gcr.io/velero-gcp/velero-plugin-for-aws:main \
ADDITIONAL_OBJECT_STORE_PROVIDER=aws \
ADDITIONAL_BSL_CONFIG=region=us-east-1 \
ADDITIONAL_BSL_BUCKET=nightly-restrict-account-test \
ADDITIONAL_BSL_PREFIX=nightly \
ADDITIONAL_CREDS_FILE=$HOME/aws-credential \
VELERO_IMAGE=gcr.io/velero-gcp/velero:main \
RESTORE_HELPER_IMAGE=gcr.io/velero-gcp/velero-restore-helper:main \
VERSION=main \
SNAPSHOT_MOVE_DATA=true \
STANDBY_CLUSTER_CLOUD_PROVIDER=vsphere \
STANDBY_CLUSTER_OBJECT_STORE_PROVIDER=aws \
STANDBY_CLUSTER_PLUGINS=gcr.io/velero-gcp/velero-plugin-for-aws:main \
DISABLE_INFORMER_CACHE=true \
REGISTRY_CREDENTIAL_FILE=$HOME/.docker/config.json \
GINKGO_LABELS=Migration \
KIBISHII_DIRECTORY=$HOME/kibishii/kubernetes/yaml/ \
make test-e2e
```
## 6. Full Tests execution
As we provided several examples for E2E test execution, if no filter is involved and despite difference of test environment,

View File

@@ -110,7 +110,7 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
if InstallVelero {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
err = VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)
err = VeleroUninstall(ctx, veleroCfg)
Expect(err).To(Succeed())
}
}
@@ -197,9 +197,9 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
secretKey,
)).To(Succeed())
bsls := []string{"default", additionalBsl}
BSLs := []string{"default", additionalBsl}
for _, bsl := range bsls {
for _, bsl := range BSLs {
backupName = fmt.Sprintf("backup-%s", bsl)
restoreName = fmt.Sprintf("restore-%s", bsl)
// We limit the length of backup name here to avoid the issue of vsphere plugin https://github.com/vmware-tanzu/velero-plugin-for-vsphere/issues/370
@@ -209,8 +209,8 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
restoreName = fmt.Sprintf("%s-%s", restoreName, UUIDgen)
}
veleroCfg.ProvideSnapshotsVolumeParam = !provideSnapshotVolumesParmInBackup
workloadNmespace := kibishiiNamespace + bsl
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, bsl, workloadNmespace, useVolumeSnapshots, !useVolumeSnapshots)).To(Succeed(),
workloadNS := kibishiiNamespace + bsl
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, bsl, workloadNS, useVolumeSnapshots, !useVolumeSnapshots)).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace using BSL %s", bsl)
}
})

View File

@@ -34,7 +34,7 @@ import (
. "github.com/vmware-tanzu/velero/test/util/velero"
)
// Test backup and restore of Kibishi using restic
// Test backup and restore of Kibishii using restic
func BackupDeletionWithSnapshots() {
backup_deletion_test(true)
@@ -99,8 +99,6 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
providerName := veleroCfg.CloudProvider
veleroNamespace := veleroCfg.VeleroNamespace
registryCredentialFile := veleroCfg.RegistryCredentialFile
bslPrefix := veleroCfg.BSLPrefix
bslConfig := veleroCfg.BSLConfig
veleroFeatures := veleroCfg.Features
for _, ns := range workloadNamespaceList {
if err := CreateNamespace(oneHourTimeout, client, ns); err != nil {
@@ -143,7 +141,8 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
})
})
for _, ns := range workloadNamespaceList {
if providerName == Vsphere && useVolumeSnapshots {
if useVolumeSnapshots &&
veleroCfg.HasVspherePlugin {
// Wait for uploads started by the Velero Plugin for vSphere to complete
// TODO - remove after upload progress monitoring is implemented
fmt.Println("Waiting for vSphere uploads to complete")
@@ -152,7 +151,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
}
}
}
err = ObjectsShouldBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, bslPrefix, bslConfig, backupName, BackupObjectsPrefix)
err = ObjectsShouldBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig, backupName, BackupObjectsPrefix)
if err != nil {
return err
}
@@ -164,9 +163,12 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
for _, ns := range workloadNamespaceList {
snapshotCheckPoint, err = GetSnapshotCheckPoint(client, veleroCfg, DefaultKibishiiWorkerCounts, ns, backupName, KibishiiPVCNameList)
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
err = SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, bslConfig,
backupName, snapshotCheckPoint)
err = CheckSnapshotsInProvider(
veleroCfg,
backupName,
snapshotCheckPoint,
false,
)
if err != nil {
return errors.Wrap(err, "exceed waiting for snapshot created in cloud")
}
@@ -178,9 +180,12 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
// Get all snapshots base on backup name, regardless of namespaces
err = SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, bslConfig,
backupName, snapshotCheckPoint)
err = CheckSnapshotsInProvider(
veleroCfg,
backupName,
snapshotCheckPoint,
false,
)
if err != nil {
return errors.Wrap(err, "exceed waiting for snapshot created in cloud")
}
@@ -206,26 +211,34 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
return err
}
// Verify snapshots are deleted after backup deletion.
if useVolumeSnapshots {
err = SnapshotsShouldNotExistInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLConfig,
backupName, snapshotCheckPoint)
snapshotCheckPoint.ExpectCount = 0
err = CheckSnapshotsInProvider(
veleroCfg,
backupName,
snapshotCheckPoint,
false,
)
if err != nil {
return errors.Wrap(err, "exceed waiting for snapshot created in cloud")
return errors.Wrap(err, "fail to verify snapshots are deleted in provider.")
}
}
err = ObjectsShouldNotBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, bslPrefix, bslConfig, backupName, BackupObjectsPrefix, 5)
// Verify backup metadata files are deleted in OSS after backup deletion.
err = ObjectsShouldNotBeInBucket(
veleroCfg.ObjectStoreProvider,
veleroCfg.CloudCredentialsFile,
veleroCfg.BSLBucket,
veleroCfg.BSLPrefix,
veleroCfg.BSLConfig,
backupName,
BackupObjectsPrefix,
5,
)
if err != nil {
return err
}
if useVolumeSnapshots {
if err := SnapshotsShouldNotExistInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket,
bslConfig, backupName, snapshotCheckPoint); err != nil {
return errors.Wrap(err, "exceed waiting for snapshot created in cloud")
}
}
// Hit issue: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html#:~:text=SnapshotCreationPerVolumeRateExceeded
// Sleep for more than 15 seconds to avoid this issue.
@@ -242,13 +255,28 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
})
})
err = DeleteObjectsInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, bslPrefix, bslConfig, backupName, BackupObjectsPrefix)
if err != nil {
if err := DeleteObjectsInBucket(
veleroCfg.ObjectStoreProvider,
veleroCfg.CloudCredentialsFile,
veleroCfg.BSLBucket,
veleroCfg.BSLPrefix,
veleroCfg.BSLConfig,
backupName,
BackupObjectsPrefix,
); err != nil {
return err
}
err = ObjectsShouldNotBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, bslPrefix, bslConfig, backupName, BackupObjectsPrefix, 1)
if err != nil {
if err := ObjectsShouldNotBeInBucket(
veleroCfg.ObjectStoreProvider,
veleroCfg.CloudCredentialsFile,
veleroCfg.BSLBucket,
veleroCfg.BSLPrefix,
veleroCfg.BSLConfig,
backupName,
BackupObjectsPrefix,
1,
); err != nil {
return err
}

View File

@@ -72,7 +72,7 @@ func BackupsSyncTest() {
if InstallVelero {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
}
}
})
@@ -108,7 +108,7 @@ func BackupsSyncTest() {
})
By("Uninstall velero", func() {
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
})
By("Install velero", func() {

View File

@@ -84,7 +84,7 @@ func TTLTest() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
if InstallVelero {
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
}
Expect(DeleteNamespace(ctx, client, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS))
}
@@ -122,19 +122,31 @@ func TTLTest() {
var snapshotCheckPoint SnapshotCheckPoint
if useVolumeSnapshots {
if veleroCfg.CloudProvider == Vsphere {
// TODO - remove after upload progress monitoring is implemented
if veleroCfg.HasVspherePlugin {
By("Waiting for vSphere uploads to complete", func() {
Expect(WaitForVSphereUploadCompletion(ctx, time.Hour,
test.testNS, 2)).To(Succeed())
})
}
snapshotCheckPoint, err = GetSnapshotCheckPoint(client, veleroCfg, 2, test.testNS, test.backupName, KibishiiPVCNameList)
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLConfig,
test.backupName, snapshotCheckPoint)).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
snapshotCheckPoint, err = GetSnapshotCheckPoint(
client,
veleroCfg,
2,
test.testNS,
test.backupName,
KibishiiPVCNameList,
)
Expect(err).NotTo(HaveOccurred(), "Fail to get snapshot checkpoint")
Expect(
CheckSnapshotsInProvider(
veleroCfg,
test.backupName,
snapshotCheckPoint,
false,
),
).NotTo(HaveOccurred(), "Fail to verify the created snapshots")
}
By(fmt.Sprintf("Simulating a disaster by removing namespace %s\n", BackupCfg.BackupName), func() {
@@ -188,9 +200,13 @@ func TTLTest() {
By("PersistentVolume snapshots should be deleted", func() {
if useVolumeSnapshots {
Expect(SnapshotsShouldNotExistInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLConfig,
test.backupName, snapshotCheckPoint)).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
snapshotCheckPoint.ExpectCount = 0
Expect(CheckSnapshotsInProvider(
veleroCfg,
test.backupName,
snapshotCheckPoint,
false,
)).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
}
})

View File

@@ -83,13 +83,11 @@ func APIExtensionsVersionsTest() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
Expect(DeleteCRDByName(context.Background(), crdName)).To(Succeed())
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyClusterContext)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
Expect(DeleteCRDByName(context.Background(), crdName)).To(Succeed())
})
}

View File

@@ -100,7 +100,7 @@ func APIGroupVersionsTest() {
})
if InstallVelero {
By("Uninstall Velero in api group version case", func() {
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).NotTo(HaveOccurred())
Expect(VeleroUninstall(ctx, veleroCfg)).NotTo(HaveOccurred())
})
}
}

View File

@@ -31,7 +31,6 @@ import (
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/util/common"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
)
type BackupVolumeInfo struct {
@@ -108,9 +107,6 @@ func (v *BackupVolumeInfo) CreateResources() error {
return errors.Wrapf(err, "Failed to create namespace %s", createNSName)
}
// Install StorageClass
Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s-csi.yaml", v.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install StorageClass")
// Create deployment
fmt.Printf("Creating deployment in namespaces ...%s\n", createNSName)
// Make sure PVC count is great than 3 to allow both empty volumes and file populated volumes exist per pod
@@ -120,7 +116,7 @@ func (v *BackupVolumeInfo) CreateResources() error {
var vols []*v1.Volume
for i := 0; i <= pvcCount-1; i++ {
pvcName := fmt.Sprintf("volume-info-pvc-%d", i)
pvc, err := CreatePVC(v.Client, createNSName, pvcName, CSIStorageClassName, nil)
pvc, err := CreatePVC(v.Client, createNSName, pvcName, StorageClassName, nil)
Expect(err).To(Succeed())
volumeName := fmt.Sprintf("volume-info-pv-%d", i)
vols = append(vols, CreateVolumes(pvc.Name, []string{volumeName})...)
@@ -159,11 +155,3 @@ func (v *BackupVolumeInfo) Destroy() error {
return WaitAllSelectedNSDeleted(v.Ctx, v.Client, "ns-test=true")
}
func (v *BackupVolumeInfo) cleanResource() error {
if err := DeleteStorageClass(v.Ctx, v.Client, CSIStorageClassName); err != nil {
return errors.Wrap(err, "fail to delete the StorageClass")
}
return nil
}

View File

@@ -61,6 +61,5 @@ func (c *CSIDataMoverVolumeInfo) Verify() error {
Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true))
Expect(volumeInfo[0].SnapshotDataMovementInfo).NotTo(BeNil())
// Clean SC and VSC
return c.cleanResource()
return nil
}

View File

@@ -60,6 +60,5 @@ func (c *CSISnapshotVolumeInfo) Verify() error {
Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true))
Expect(volumeInfo[0].CSISnapshotInfo).NotTo(BeNil())
// Clean SC and VSC
return c.cleanResource()
return nil
}

View File

@@ -60,6 +60,5 @@ func (f *FilesystemUploadVolumeInfo) Verify() error {
Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true))
Expect(volumeInfo[0].PVBInfo).NotTo(BeNil())
// Clean SC and VSC
return f.cleanResource()
return nil
}

View File

@@ -61,6 +61,5 @@ func (n *NativeSnapshotVolumeInfo) Verify() error {
Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true))
Expect(volumeInfo[0].NativeSnapshotInfo).NotTo(BeNil())
// Clean SC and VSC
return n.cleanResource()
return nil
}

View File

@@ -60,6 +60,5 @@ func (s *SkippedVolumeInfo) Verify() error {
Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true))
Expect(volumeInfo[0].Skipped).To(BeIdenticalTo(true))
// Clean SC and VSC
return s.cleanResource()
return nil
}

View File

@@ -68,14 +68,6 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
fmt.Sprintf("Failed to create namespace %s", p.namespace))
})
By(fmt.Sprintf("Create a storage class %s.", StorageClassName), func() {
Expect(InstallStorageClass(context.Background(), fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))).To(Succeed())
})
By(fmt.Sprintf("Create a storage class %s.", StorageClassName), func() {
Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class")
})
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
nodeNameList, err := GetWorkerNodes(p.Ctx)
Expect(err).To(Succeed())

View File

@@ -18,7 +18,7 @@ type StorageClasssChanging struct {
TestCase
labels map[string]string
data map[string]string
configmaptName string
cmName string
namespace string
srcStorageClass string
desStorageClass string
@@ -51,7 +51,7 @@ func (s *StorageClasssChanging) Init() error {
s.labels = map[string]string{"velero.io/change-storage-class": "RestoreItemAction",
"velero.io/plugin-config": ""}
s.data = map[string]string{s.srcStorageClass: s.desStorageClass}
s.configmaptName = "change-storage-class-config"
s.cmName = "change-storage-class-config"
s.volume = "volume-1"
s.pvcName = fmt.Sprintf("pvc-%s", s.volume)
s.podName = "pod-1"
@@ -72,10 +72,6 @@ func (s *StorageClasssChanging) CreateResources() error {
"app": "test",
}
By(("Installing storage class..."), func() {
Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s.yaml", s.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class")
})
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", s.namespace))
@@ -94,8 +90,8 @@ func (s *StorageClasssChanging) CreateResources() error {
Expect(err).To(Succeed())
})
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() {
_, err := CreateConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.configmaptName, s.labels, s.data)
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.cmName, s.VeleroCfg.VeleroNamespace), func() {
_, err := CreateConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.cmName, s.labels, s.data)
Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", s.VeleroCfg.VeleroNamespace))
})
return nil
@@ -149,8 +145,7 @@ func (s *StorageClasssChanging) Clean() error {
Expect(CleanupNamespacesWithPoll(s.Ctx, s.Client, s.CaseBaseName)).To(Succeed(),
fmt.Sprintf("Failed to delete namespace %s", s.CaseBaseName))
})
DeleteConfigmap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.configmaptName)
DeleteStorageClass(s.Ctx, s.Client, s.desStorageClass)
DeleteConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.cmName)
s.TestCase.Clean()
}

View File

@@ -36,12 +36,12 @@ import (
const (
// Please make sure length of this namespace should be shorter,
// otherwise ResticRepositories name verification will be wrong
// when making combination of ResticRepositories name(max length is 63)
// otherwise BackupRepositories name verification will be wrong
// when making combination of BackupRepositories name(max length is 63)
bslDeletionTestNs = "bsl-deletion"
)
// Test backup and restore of Kibishi using restic
// Test backup and restore of Kibishii using restic
func BslDeletionWithSnapshots() {
BslDeletionTest(true)
@@ -134,17 +134,17 @@ func BslDeletionTest(useVolumeSnapshots bool) {
)).To(Succeed())
})
backupName_1 := "backup1-" + UUIDgen.String()
backupName_2 := "backup2-" + UUIDgen.String()
backupLocation_1 := "default"
backupLocation_2 := additionalBsl
podName_1 := "kibishii-deployment-0"
podName_2 := "kibishii-deployment-1"
backupName1 := "backup1-" + UUIDgen.String()
backupName2 := "backup2-" + UUIDgen.String()
backupLocation1 := "default"
backupLocation2 := additionalBsl
podName1 := "kibishii-deployment-0"
podName2 := "kibishii-deployment-1"
label_1 := "for=1"
label1 := "for=1"
// TODO remove when issue https://github.com/vmware-tanzu/velero/issues/4724 is fixed
//label_2 := "for!=1"
label_2 := "for=2"
//label2 := "for!=1"
label2 := "for=2"
By("Create namespace for sample workload", func() {
Expect(CreateNamespace(oneHourTimeout, *veleroCfg.ClientToInstallVelero, bslDeletionTestNs)).To(Succeed())
})
@@ -157,33 +157,33 @@ func BslDeletionTest(useVolumeSnapshots bool) {
// Restic can not backup PV only, so pod need to be labeled also
By("Label all 2 worker-pods of Kibishii", func() {
Expect(AddLabelToPod(context.Background(), podName_1, bslDeletionTestNs, label_1)).To(Succeed())
Expect(AddLabelToPod(context.Background(), "kibishii-deployment-1", bslDeletionTestNs, label_2)).To(Succeed())
Expect(AddLabelToPod(context.Background(), podName1, bslDeletionTestNs, label1)).To(Succeed())
Expect(AddLabelToPod(context.Background(), "kibishii-deployment-1", bslDeletionTestNs, label2)).To(Succeed())
})
By("Get all 2 PVCs of Kibishii and label them separately ", func() {
pvc, err := GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName_1)
pvc, err := GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName1)
Expect(err).To(Succeed())
fmt.Println(pvc)
Expect(pvc).To(HaveLen(1))
pvc1 := pvc[0]
pvc, err = GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName_2)
pvc, err = GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName2)
Expect(err).To(Succeed())
fmt.Println(pvc)
Expect(pvc).To(HaveLen(1))
pvc2 := pvc[0]
Expect(AddLabelToPvc(context.Background(), pvc1, bslDeletionTestNs, label_1)).To(Succeed())
Expect(AddLabelToPvc(context.Background(), pvc2, bslDeletionTestNs, label_2)).To(Succeed())
Expect(AddLabelToPvc(context.Background(), pvc1, bslDeletionTestNs, label1)).To(Succeed())
Expect(AddLabelToPvc(context.Background(), pvc2, bslDeletionTestNs, label2)).To(Succeed())
})
var BackupCfg BackupConfig
BackupCfg.BackupName = backupName_1
BackupCfg.BackupName = backupName1
BackupCfg.Namespace = bslDeletionTestNs
BackupCfg.BackupLocation = backupLocation_1
BackupCfg.BackupLocation = backupLocation1
BackupCfg.UseVolumeSnapshots = useVolumeSnapshots
BackupCfg.DefaultVolumesToFsBackup = !useVolumeSnapshots
BackupCfg.Selector = label_1
By(fmt.Sprintf("Backup one of PV of sample workload by label-1 - Kibishii by the first BSL %s", backupLocation_1), func() {
BackupCfg.Selector = label1
By(fmt.Sprintf("Backup one of PV of sample workload by label-1 - Kibishii by the first BSL %s", backupLocation1), func() {
// TODO currently, the upgrade case covers the upgrade path from 1.6 to main and the velero v1.6 doesn't support "debug" command
// TODO move to "runDebug" after we bump up to 1.7 in the upgrade case
Expect(VeleroBackupNamespace(oneHourTimeout, veleroCfg.VeleroCLI,
@@ -193,10 +193,10 @@ func BslDeletionTest(useVolumeSnapshots bool) {
})
})
BackupCfg.BackupName = backupName_2
BackupCfg.BackupLocation = backupLocation_2
BackupCfg.Selector = label_2
By(fmt.Sprintf("Back up the other one PV of sample workload with label-2 into the additional BSL %s", backupLocation_2), func() {
BackupCfg.BackupName = backupName2
BackupCfg.BackupLocation = backupLocation2
BackupCfg.Selector = label2
By(fmt.Sprintf("Back up the other one PV of sample workload with label-2 into the additional BSL %s", backupLocation2), func() {
Expect(VeleroBackupNamespace(oneHourTimeout, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string {
RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, BackupCfg.BackupName, "")
@@ -205,88 +205,99 @@ func BslDeletionTest(useVolumeSnapshots bool) {
})
if useVolumeSnapshots {
if veleroCfg.CloudProvider == Vsphere {
// TODO - remove after upload progress monitoring is implemented
if veleroCfg.HasVspherePlugin {
By("Waiting for vSphere uploads to complete", func() {
Expect(WaitForVSphereUploadCompletion(oneHourTimeout, time.Hour,
bslDeletionTestNs, 2)).To(Succeed())
})
By(fmt.Sprintf("Snapshot CR in backup %s should be created", backupName_1), func() {
By(fmt.Sprintf("Snapshot CR in backup %s should be created", backupName1), func() {
Expect(SnapshotCRsCountShouldBe(context.Background(), bslDeletionTestNs,
backupName_1, 1)).To(Succeed())
backupName1, 1)).To(Succeed())
})
By(fmt.Sprintf("Snapshot CR in backup %s should be created", backupName_2), func() {
By(fmt.Sprintf("Snapshot CR in backup %s should be created", backupName2), func() {
Expect(SnapshotCRsCountShouldBe(context.Background(), bslDeletionTestNs,
backupName_2, 1)).To(Succeed())
backupName2, 1)).To(Succeed())
})
}
if veleroCfg.CloudProvider != VanillaZFS {
var snapshotCheckPoint SnapshotCheckPoint
snapshotCheckPoint.NamespaceBackedUp = bslDeletionTestNs
By(fmt.Sprintf("Snapshot of bsl %s should be created in cloud object store", backupLocation_1), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 1, bslDeletionTestNs, backupName_1, []string{podName_1})
By(fmt.Sprintf("Snapshot of bsl %s should be created in cloud object store", backupLocation1), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(
*veleroCfg.ClientToInstallVelero,
veleroCfg,
1,
bslDeletionTestNs,
backupName1,
[]string{podName1},
)
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket,
veleroCfg.BSLConfig, backupName_1, snapshotCheckPoint)).To(Succeed())
Expect(CheckSnapshotsInProvider(
veleroCfg,
backupName1,
snapshotCheckPoint,
false,
)).To(Succeed())
})
By(fmt.Sprintf("Snapshot of bsl %s should be created in cloud object store", backupLocation_2), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 1, bslDeletionTestNs, backupName_2, []string{podName_2})
By(fmt.Sprintf("Snapshot of bsl %s should be created in cloud object store", backupLocation2), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(
*veleroCfg.ClientToInstallVelero,
veleroCfg,
1,
bslDeletionTestNs,
backupName2,
[]string{podName2},
)
Expect(err).NotTo(HaveOccurred(), "Fail to get snapshot checkpoint")
var BSLCredentials, BSLConfig string
if veleroCfg.CloudProvider == Vsphere {
BSLCredentials = veleroCfg.AdditionalBSLCredentials
BSLConfig = veleroCfg.AdditionalBSLConfig
} else { // Snapshotting with non-vSphere provider has nothing to do with BSL
BSLCredentials = veleroCfg.CloudCredentialsFile
BSLConfig = veleroCfg.BSLConfig
}
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
BSLCredentials, veleroCfg.AdditionalBSLBucket,
BSLConfig, backupName_2, snapshotCheckPoint)).To(Succeed())
Expect(CheckSnapshotsInProvider(
veleroCfg,
backupName2,
snapshotCheckPoint,
true,
)).To(Succeed())
})
}
} else { // For Restics
By(fmt.Sprintf("Resticrepositories for BSL %s should be created in Velero namespace", backupLocation_1), func() {
} else {
By(fmt.Sprintf("BackupRepositories for BSL %s should be created in Velero namespace", backupLocation1), func() {
Expect(BackupRepositoriesCountShouldBe(context.Background(),
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation_1, 1)).To(Succeed())
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation1, 1)).To(Succeed())
})
By(fmt.Sprintf("Resticrepositories for BSL %s should be created in Velero namespace", backupLocation_2), func() {
By(fmt.Sprintf("BackupRepositories for BSL %s should be created in Velero namespace", backupLocation2), func() {
Expect(BackupRepositoriesCountShouldBe(context.Background(),
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation_2, 1)).To(Succeed())
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation2, 1)).To(Succeed())
})
}
By(fmt.Sprintf("Backup 1 %s should be created.", backupName_1), func() {
By(fmt.Sprintf("Backup 1 %s should be created.", backupName1), func() {
Expect(WaitForBackupToBeCreated(context.Background(),
backupName_1, 10*time.Minute, &veleroCfg)).To(Succeed())
backupName1, 10*time.Minute, &veleroCfg)).To(Succeed())
})
By(fmt.Sprintf("Backup 2 %s should be created.", backupName_2), func() {
By(fmt.Sprintf("Backup 2 %s should be created.", backupName2), func() {
Expect(WaitForBackupToBeCreated(context.Background(),
backupName_2, 10*time.Minute, &veleroCfg)).To(Succeed())
backupName2, 10*time.Minute, &veleroCfg)).To(Succeed())
})
backupsInBSL1, err := GetBackupsFromBsl(context.Background(), veleroCfg.VeleroCLI, backupLocation_1)
backupsInBSL1, err := GetBackupsFromBsl(context.Background(), veleroCfg.VeleroCLI, backupLocation1)
Expect(err).To(Succeed())
backupsInBSL2, err := GetBackupsFromBsl(context.Background(), veleroCfg.VeleroCLI, backupLocation_2)
backupsInBSL2, err := GetBackupsFromBsl(context.Background(), veleroCfg.VeleroCLI, backupLocation2)
Expect(err).To(Succeed())
backupsInBsl1AndBsl2 := append(backupsInBSL1, backupsInBSL2...)
By(fmt.Sprintf("Get all backups from 2 BSLs %s before deleting one of them", backupLocation_1), func() {
By(fmt.Sprintf("Get all backups from 2 BSLs %s before deleting one of them", backupLocation1), func() {
backupsBeforeDel, err := GetAllBackups(context.Background(), veleroCfg.VeleroCLI)
Expect(err).To(Succeed())
Expect(cmp.Diff(backupsInBsl1AndBsl2, backupsBeforeDel, cmpopts.SortSlices(less))).Should(BeEmpty())
By(fmt.Sprintf("Backup1 %s should exist in cloud object store before bsl deletion", backupName_1), func() {
By(fmt.Sprintf("Backup1 %s should exist in cloud object store before bsl deletion", backupName1), func() {
Expect(ObjectsShouldBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile,
veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig,
backupName_1, BackupObjectsPrefix)).To(Succeed())
backupName1, BackupObjectsPrefix)).To(Succeed())
})
By(fmt.Sprintf("Delete one of backup locations - %s", backupLocation_1), func() {
Expect(DeleteBslResource(context.Background(), veleroCfg.VeleroCLI, backupLocation_1)).To(Succeed())
By(fmt.Sprintf("Delete one of backup locations - %s", backupLocation1), func() {
Expect(DeleteBslResource(context.Background(), veleroCfg.VeleroCLI, backupLocation1)).To(Succeed())
Expect(WaitForBackupsToBeDeleted(context.Background(), backupsInBSL1, 10*time.Minute, &veleroCfg)).To(Succeed())
})
@@ -298,10 +309,10 @@ func BslDeletionTest(useVolumeSnapshots bool) {
})
})
By(fmt.Sprintf("Backup1 %s should still exist in cloud object store after bsl deletion", backupName_1), func() {
By(fmt.Sprintf("Backup1 %s should still exist in cloud object store after bsl deletion", backupName1), func() {
Expect(ObjectsShouldBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile,
veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig,
backupName_1, BackupObjectsPrefix)).To(Succeed())
backupName1, BackupObjectsPrefix)).To(Succeed())
})
// TODO: Choose additional BSL to be deleted as an new test case
@@ -312,46 +323,53 @@ func BslDeletionTest(useVolumeSnapshots bool) {
// })
if useVolumeSnapshots {
if veleroCfg.CloudProvider == Vsphere {
By(fmt.Sprintf("Snapshot in backup %s should still exist, because snapshot CR will be deleted 24 hours later if the status is a success", backupName_2), func() {
if veleroCfg.HasVspherePlugin {
By(fmt.Sprintf("Snapshot in backup %s should still exist, because snapshot CR will be deleted 24 hours later if the status is a success", backupName2), func() {
Expect(SnapshotCRsCountShouldBe(context.Background(), bslDeletionTestNs,
backupName_1, 1)).To(Succeed())
backupName1, 1)).To(Succeed())
Expect(SnapshotCRsCountShouldBe(context.Background(), bslDeletionTestNs,
backupName_2, 1)).To(Succeed())
backupName2, 1)).To(Succeed())
})
}
var snapshotCheckPoint SnapshotCheckPoint
snapshotCheckPoint.NamespaceBackedUp = bslDeletionTestNs
By(fmt.Sprintf("Snapshot should not be deleted in cloud object store after deleting bsl %s", backupLocation_1), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 1, bslDeletionTestNs, backupName_1, []string{podName_1})
By(fmt.Sprintf("Snapshot should not be deleted in cloud object store after deleting bsl %s", backupLocation1), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 1, bslDeletionTestNs, backupName1, []string{podName1})
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket,
veleroCfg.BSLConfig, backupName_1, snapshotCheckPoint)).To(Succeed())
Expect(CheckSnapshotsInProvider(
veleroCfg,
backupName1,
snapshotCheckPoint,
false,
)).To(Succeed())
})
By(fmt.Sprintf("Snapshot should not be deleted in cloud object store after deleting bsl %s", backupLocation_2), func() {
var BSLCredentials, BSLConfig string
if veleroCfg.CloudProvider == Vsphere {
BSLCredentials = veleroCfg.AdditionalBSLCredentials
BSLConfig = veleroCfg.AdditionalBSLConfig
} else {
BSLCredentials = veleroCfg.CloudCredentialsFile
BSLConfig = veleroCfg.BSLConfig
}
snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 1, bslDeletionTestNs, backupName_2, []string{podName_2})
By(fmt.Sprintf("Snapshot should not be deleted in cloud object store after deleting bsl %s", backupLocation2), func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(
*veleroCfg.ClientToInstallVelero,
veleroCfg,
1,
bslDeletionTestNs,
backupName2,
[]string{podName2},
)
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
BSLCredentials, veleroCfg.AdditionalBSLBucket,
BSLConfig, backupName_2, snapshotCheckPoint)).To(Succeed())
Expect(CheckSnapshotsInProvider(
veleroCfg,
backupName2,
snapshotCheckPoint,
true,
)).To(Succeed())
})
} else { // For Restic
By(fmt.Sprintf("Resticrepositories for BSL %s should be deleted in Velero namespace", backupLocation_1), func() {
} else {
By(fmt.Sprintf("BackupRepositories for BSL %s should be deleted in Velero namespace", backupLocation1), func() {
Expect(BackupRepositoriesCountShouldBe(context.Background(),
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation_1, 0)).To(Succeed())
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation1, 0)).To(Succeed())
})
By(fmt.Sprintf("Resticrepositories for BSL %s should still exist in Velero namespace", backupLocation_2), func() {
By(fmt.Sprintf("BackupRepositories for BSL %s should still exist in Velero namespace", backupLocation2), func() {
Expect(BackupRepositoriesCountShouldBe(context.Background(),
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation_2, 1)).To(Succeed())
veleroCfg.VeleroNamespace, bslDeletionTestNs+"-"+backupLocation2, 1)).To(Succeed())
})
}
fmt.Printf("|| EXPECTED || - Backup deletion test completed successfully\n")

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_test
package e2e
import (
"context"
@@ -22,6 +22,7 @@ import (
"flag"
"fmt"
"slices"
"strings"
"testing"
"time"
@@ -29,7 +30,7 @@ import (
. "github.com/onsi/gomega"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
. "github.com/vmware-tanzu/velero/test"
"github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/backup"
. "github.com/vmware-tanzu/velero/test/e2e/backups"
. "github.com/vmware-tanzu/velero/test/e2e/basic"
@@ -48,61 +49,300 @@ import (
. "github.com/vmware-tanzu/velero/test/e2e/scale"
. "github.com/vmware-tanzu/velero/test/e2e/schedule"
. "github.com/vmware-tanzu/velero/test/e2e/upgrade"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
"github.com/vmware-tanzu/velero/test/util/k8s"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
func init() {
VeleroCfg.Options = install.Options{}
flag.StringVar(&VeleroCfg.CloudProvider, "cloud-provider", "", "cloud that Velero will be installed into. Required.")
flag.StringVar(&VeleroCfg.ObjectStoreProvider, "object-store-provider", "", "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.")
flag.StringVar(&VeleroCfg.BSLBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.")
flag.StringVar(&VeleroCfg.CloudCredentialsFile, "credentials-file", "", "file containing credentials for backup and volume provider. Required.")
flag.StringVar(&VeleroCfg.VeleroCLI, "velerocli", "velero", "path to the velero application to use.")
flag.StringVar(&VeleroCfg.VeleroImage, "velero-image", "velero/velero:main", "image for the velero server to be tested.")
flag.StringVar(&VeleroCfg.Plugins, "plugins", "", "provider plugins to be tested.")
flag.StringVar(&VeleroCfg.AddBSLPlugins, "additional-bsl-plugins", "", "additional plugins to be tested.")
flag.StringVar(&VeleroCfg.VeleroVersion, "velero-version", "main", "image version for the velero server to be tested with.")
flag.StringVar(&VeleroCfg.RestoreHelperImage, "restore-helper-image", "", "image for the velero restore helper to be tested.")
flag.StringVar(&VeleroCfg.UpgradeFromVeleroCLI, "upgrade-from-velero-cli", "", "comma-separated list of velero application for the pre-upgrade velero server.")
flag.StringVar(&VeleroCfg.UpgradeFromVeleroVersion, "upgrade-from-velero-version", "v1.7.1", "comma-separated list of Velero version to be tested with for the pre-upgrade velero server.")
flag.StringVar(&VeleroCfg.MigrateFromVeleroCLI, "migrate-from-velero-cli", "", "comma-separated list of velero application on source cluster.")
flag.StringVar(&VeleroCfg.MigrateFromVeleroVersion, "migrate-from-velero-version", "self", "comma-separated list of Velero version to be tested with on source cluster.")
flag.StringVar(&VeleroCfg.BSLConfig, "bsl-config", "", "configuration to use for the backup storage location. Format is key1=value1,key2=value2")
flag.StringVar(&VeleroCfg.BSLPrefix, "prefix", "", "prefix under which all Velero data should be stored within the bucket. Optional.")
flag.StringVar(&VeleroCfg.VSLConfig, "vsl-config", "", "configuration to use for the volume snapshot location. Format is key1=value1,key2=value2")
flag.StringVar(&VeleroCfg.VeleroNamespace, "velero-namespace", "velero", "namespace to install Velero into")
flag.BoolVar(&InstallVelero, "install-velero", true, "install/uninstall velero during the test. Optional.")
flag.BoolVar(&VeleroCfg.UseNodeAgent, "use-node-agent", true, "whether deploy node agent daemonset velero during the test. Optional.")
flag.BoolVar(&VeleroCfg.UseVolumeSnapshots, "use-volume-snapshots", true, "whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider.")
flag.StringVar(&VeleroCfg.RegistryCredentialFile, "registry-credential-file", "", "file containing credential for the image registry, follows the same format rules as the ~/.docker/config.json file. Optional.")
flag.StringVar(&VeleroCfg.KibishiiDirectory, "kibishii-directory", "github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/", "file directory or URL path to install Kibishii. Optional.")
//vmware-tanzu-experiments
test.VeleroCfg.Options = install.Options{}
flag.StringVar(
&test.VeleroCfg.CloudProvider,
"cloud-provider",
"",
"cloud that Velero will be installed into. Required.",
)
flag.StringVar(
&test.VeleroCfg.ObjectStoreProvider,
"object-store-provider",
"",
"provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.",
)
flag.StringVar(
&test.VeleroCfg.BSLBucket,
"bucket",
"",
"name of the object storage bucket where backups from e2e tests should be stored. Required.",
)
flag.StringVar(
&test.VeleroCfg.CloudCredentialsFile,
"credentials-file",
"",
"file containing credentials for backup and volume provider. Required.",
)
flag.StringVar(
&test.VeleroCfg.VeleroCLI,
"velerocli",
"velero",
"path to the velero application to use.",
)
flag.StringVar(
&test.VeleroCfg.VeleroImage,
"velero-image",
"velero/velero:main",
"image for the velero server to be tested.",
)
flag.StringVar(
&test.VeleroCfg.Plugins,
"plugins",
"",
"provider plugins to be tested.",
)
flag.StringVar(
&test.VeleroCfg.AddBSLPlugins,
"additional-bsl-plugins",
"",
"additional plugins to be tested.",
)
flag.StringVar(
&test.VeleroCfg.VeleroVersion,
"velero-version",
"main",
"image version for the velero server to be tested with.",
)
flag.StringVar(
&test.VeleroCfg.RestoreHelperImage,
"restore-helper-image",
"",
"image for the velero restore helper to be tested.",
)
flag.StringVar(
&test.VeleroCfg.UpgradeFromVeleroCLI,
"upgrade-from-velero-cli",
"",
"comma-separated list of velero application for the pre-upgrade velero server.",
)
flag.StringVar(
&test.VeleroCfg.UpgradeFromVeleroVersion,
"upgrade-from-velero-version",
"v1.7.1",
"comma-separated list of Velero version to be tested with for the pre-upgrade velero server.",
)
flag.StringVar(
&test.VeleroCfg.MigrateFromVeleroCLI,
"migrate-from-velero-cli",
"",
"comma-separated list of velero application on source cluster.",
)
flag.StringVar(
&test.VeleroCfg.MigrateFromVeleroVersion,
"migrate-from-velero-version",
"self",
"comma-separated list of Velero version to be tested with on source cluster.",
)
flag.StringVar(
&test.VeleroCfg.BSLConfig,
"bsl-config",
"", "configuration to use for the backup storage location. Format is key1=value1,key2=value2")
flag.StringVar(
&test.VeleroCfg.BSLPrefix,
"prefix",
"",
"prefix under which all Velero data should be stored within the bucket. Optional.",
)
flag.StringVar(
&test.VeleroCfg.VSLConfig,
"vsl-config",
"",
"configuration to use for the volume snapshot location. Format is key1=value1,key2=value2",
)
flag.StringVar(
&test.VeleroCfg.VeleroNamespace,
"velero-namespace",
"velero",
"namespace to install Velero into",
)
flag.BoolVar(
&test.InstallVelero,
"install-velero",
true,
"install/uninstall velero during the test. Optional.",
)
flag.BoolVar(
&test.VeleroCfg.UseNodeAgent,
"use-node-agent",
true,
"whether deploy node agent daemonset velero during the test. Optional.",
)
flag.BoolVar(
&test.VeleroCfg.UseVolumeSnapshots,
"use-volume-snapshots",
true,
"whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider.",
)
flag.StringVar(
&test.VeleroCfg.RegistryCredentialFile,
"registry-credential-file",
"",
"file containing credential for the image registry, follows the same format rules as the ~/.docker/config.json file. Optional.",
)
flag.StringVar(
&test.VeleroCfg.KibishiiDirectory,
"kibishii-directory",
"github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/",
"file directory or URL path to install Kibishii. Optional.",
)
// Flags to create an additional BSL for multiple credentials test
flag.StringVar(&VeleroCfg.AdditionalBSLProvider, "additional-bsl-object-store-provider", "", "provider of object store plugin for additional backup storage location. Required if testing multiple credentials support.")
flag.StringVar(&VeleroCfg.AdditionalBSLBucket, "additional-bsl-bucket", "", "name of the object storage bucket for additional backup storage location. Required if testing multiple credentials support.")
flag.StringVar(&VeleroCfg.AdditionalBSLPrefix, "additional-bsl-prefix", "", "prefix under which all Velero data should be stored within the bucket for additional backup storage location. Optional.")
flag.StringVar(&VeleroCfg.AdditionalBSLConfig, "additional-bsl-config", "", "configuration to use for the additional backup storage location. Format is key1=value1,key2=value2")
flag.StringVar(&VeleroCfg.AdditionalBSLCredentials, "additional-bsl-credentials-file", "", "file containing credentials for additional backup storage location provider. Required if testing multiple credentials support.")
flag.StringVar(&VeleroCfg.Features, "features", "", "comma-separated list of features to enable for this Velero process.")
flag.StringVar(&VeleroCfg.GCFrequency, "garbage-collection-frequency", "", "frequency of garbage collection.")
flag.StringVar(&VeleroCfg.DefaultClusterContext, "default-cluster-context", "", "default cluster's kube config context, it's for migration test.")
flag.StringVar(&VeleroCfg.StandbyClusterContext, "standby-cluster-context", "", "standby cluster's kube config context, it's for migration test.")
flag.StringVar(&VeleroCfg.UploaderType, "uploader-type", "", "type of uploader for persistent volume backup.")
flag.BoolVar(&VeleroCfg.VeleroServerDebugMode, "velero-server-debug-mode", false, "a switch for enable or disable having debug log of Velero server.")
flag.BoolVar(&VeleroCfg.SnapshotMoveData, "snapshot-move-data", false, "a Switch for taking backup with Velero's data mover, if data-mover-plugin is not provided, using built-in plugin")
flag.StringVar(&VeleroCfg.DataMoverPlugin, "data-mover-plugin", "", "customized plugin for data mover.")
flag.StringVar(&VeleroCfg.StandbyClusterCloudProvider, "standby-cluster-cloud-provider", "", "cloud provider for standby cluster.")
flag.StringVar(&VeleroCfg.StandbyClusterPlugins, "standby-cluster-plugins", "", "plugins provider for standby cluster.")
flag.StringVar(&VeleroCfg.StandbyClusterObjectStoreProvider, "standby-cluster-object-store-provider", "", "object store provider for standby cluster.")
flag.BoolVar(&VeleroCfg.DebugVeleroPodRestart, "debug-velero-pod-restart", false, "a switch for debugging velero pod restart.")
flag.BoolVar(&VeleroCfg.DisableInformerCache, "disable-informer-cache", false, "a switch for disable informer cache.")
flag.StringVar(&VeleroCfg.DefaultClusterName, "default-cluster-name", "", "default cluster's name in kube config file, it's for EKS IRSA test.")
flag.StringVar(&VeleroCfg.StandbyClusterName, "standby-cluster-name", "", "standby cluster's name in kube config file, it's for EKS IRSA test.")
flag.StringVar(&VeleroCfg.EKSPolicyARN, "eks-policy-arn", "", "EKS plicy ARN for creating AWS IAM service account.")
flag.StringVar(&VeleroCfg.DefaultCLSServiceAccountName, "default-cls-service-account-name", "", "default cluster service account name.")
flag.StringVar(&VeleroCfg.StandbyCLSServiceAccountName, "standby-cls-service-account-name", "", "standby cluster service account name.")
flag.BoolVar(&VeleroCfg.FailFast, "fail-fast", true, "a switch for failing fast on meeting error.")
flag.StringVar(
&test.VeleroCfg.AdditionalBSLProvider,
"additional-bsl-object-store-provider",
"",
"provider of object store plugin for additional backup storage location. Required if testing multiple credentials support.",
)
flag.StringVar(
&test.VeleroCfg.AdditionalBSLBucket,
"additional-bsl-bucket",
"",
"name of the object storage bucket for additional backup storage location. Required if testing multiple credentials support.",
)
flag.StringVar(
&test.VeleroCfg.AdditionalBSLPrefix,
"additional-bsl-prefix",
"",
"prefix under which all Velero data should be stored within the bucket for additional backup storage location. Optional.",
)
flag.StringVar(
&test.VeleroCfg.AdditionalBSLConfig,
"additional-bsl-config",
"",
"configuration to use for the additional backup storage location. Format is key1=value1,key2=value2",
)
flag.StringVar(
&test.VeleroCfg.AdditionalBSLCredentials,
"additional-bsl-credentials-file",
"",
"file containing credentials for additional backup storage location provider. Required if testing multiple credentials support.",
)
flag.StringVar(
&test.VeleroCfg.Features,
"features",
"",
"comma-separated list of features to enable for this Velero process.",
)
flag.StringVar(
&test.VeleroCfg.GCFrequency,
"garbage-collection-frequency",
"",
"frequency of garbage collection.",
)
flag.StringVar(
&test.VeleroCfg.DefaultClusterContext,
"default-cluster-context",
"",
"default cluster's kube config context, it's for migration test.",
)
flag.StringVar(
&test.VeleroCfg.StandbyClusterContext,
"standby-cluster-context",
"",
"standby cluster's kube config context, it's for migration test.",
)
flag.StringVar(
&test.VeleroCfg.UploaderType,
"uploader-type",
"",
"type of uploader for persistent volume backup.",
)
flag.BoolVar(
&test.VeleroCfg.VeleroServerDebugMode,
"velero-server-debug-mode",
false,
"a switch for enable or disable having debug log of Velero server.",
)
flag.BoolVar(
&test.VeleroCfg.SnapshotMoveData,
"snapshot-move-data",
false,
"a Switch for taking backup with Velero's data mover, if data-mover-plugin is not provided, using built-in plugin",
)
flag.StringVar(
&test.VeleroCfg.DataMoverPlugin,
"data-mover-plugin",
"",
"customized plugin for data mover.",
)
flag.StringVar(
&test.VeleroCfg.StandbyClusterCloudProvider,
"standby-cluster-cloud-provider",
"",
"cloud provider for standby cluster.",
)
flag.StringVar(
&test.VeleroCfg.StandbyClusterPlugins,
"standby-cluster-plugins",
"",
"plugins provider for standby cluster.",
)
flag.StringVar(
&test.VeleroCfg.StandbyClusterObjectStoreProvider,
"standby-cluster-object-store-provider",
"",
"object store provider for standby cluster.",
)
flag.BoolVar(
&test.VeleroCfg.DebugVeleroPodRestart,
"debug-velero-pod-restart",
false,
"a switch for debugging velero pod restart.",
)
flag.BoolVar(
&test.VeleroCfg.DisableInformerCache,
"disable-informer-cache",
false,
"a switch for disable informer cache.",
)
flag.StringVar(
&test.VeleroCfg.DefaultClusterName,
"default-cluster-name",
"",
"default cluster's name in kube config file, it's for EKS IRSA test.",
)
flag.StringVar(
&test.VeleroCfg.StandbyClusterName,
"standby-cluster-name",
"",
"standby cluster's name in kube config file, it's for EKS IRSA test.",
)
flag.StringVar(
&test.VeleroCfg.EKSPolicyARN,
"eks-policy-arn",
"",
"EKS plicy ARN for creating AWS IAM service account.",
)
flag.StringVar(
&test.VeleroCfg.DefaultCLSServiceAccountName,
"default-cls-service-account-name",
"",
"default cluster service account name.",
)
flag.StringVar(
&test.VeleroCfg.StandbyCLSServiceAccountName,
"standby-cls-service-account-name",
"",
"standby cluster service account name.",
)
flag.BoolVar(
&test.VeleroCfg.FailFast,
"fail-fast",
true,
"a switch for failing fast on meeting error.",
)
flag.BoolVar(
&test.VeleroCfg.HasVspherePlugin,
"has-vsphere-plugin",
false,
"a switch for installing vSphere plugin.",
)
}
// Add label [SkipVanillaZfs]:
@@ -112,147 +352,302 @@ func init() {
// caused by no expected snapshot found. If we use retain as reclaim policy, then this label can be ignored, all test
// cases can be executed as expected successful result.
var _ = Describe("Velero tests with various CRD API group versions",
Label("APIGroup", "APIVersion", "SKIP_KIND", "LongTime"), APIGroupVersionsTest)
var _ = Describe("CRD of apiextentions v1beta1 should be B/R successfully from cluster(k8s version < 1.22) to cluster(k8s version >= 1.22)",
Label("APIGroup", "APIExtensions", "SKIP_KIND"), APIExtensionsVersionsTest)
var _ = Describe(
"Velero tests with various CRD API group versions",
Label("APIGroup", "APIVersion", "SKIP_KIND", "LongTime"),
APIGroupVersionsTest,
)
var _ = Describe(
"CRD of apiextentions v1beta1 should be B/R successfully from cluster(k8s version < 1.22) to cluster(k8s version >= 1.22)",
Label("APIGroup", "APIExtensions", "SKIP_KIND"),
APIExtensionsVersionsTest,
)
// Test backup and restore of Kibishii using restic
var _ = Describe("Velero tests on cluster using the plugin provider for object storage and Restic for volume backups",
Label("Basic", "Restic"), BackupRestoreWithRestic)
var _ = Describe(
"Velero tests on cluster using the plugin provider for object storage and Restic for volume backups",
Label("Basic", "Restic"),
BackupRestoreWithRestic,
)
var _ = Describe("Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Basic", "Snapshot", "SkipVanillaZfs"), BackupRestoreWithSnapshots)
var _ = Describe(
"Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Basic", "Snapshot", "SkipVanillaZfs"),
BackupRestoreWithSnapshots,
)
var _ = Describe("Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Basic", "Snapshot", "RetainPV"), BackupRestoreRetainedPVWithSnapshots)
var _ = Describe(
"Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Basic", "Snapshot", "RetainPV"),
BackupRestoreRetainedPVWithSnapshots,
)
var _ = Describe("Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Basic", "Restic", "RetainPV"), BackupRestoreRetainedPVWithRestic)
var _ = Describe(
"Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Basic", "Restic", "RetainPV"),
BackupRestoreRetainedPVWithRestic,
)
var _ = Describe("Backup/restore of cluster resources",
Label("Basic", "ClusterResource"), ResourcesCheckTest)
var _ = Describe(
"Backup/restore of cluster resources",
Label("Basic", "ClusterResource"),
ResourcesCheckTest,
)
var _ = Describe("Service NodePort reservation during restore is configurable",
Label("Basic", "NodePort"), NodePortTest)
var _ = Describe(
"Service NodePort reservation during restore is configurable",
Label("Basic", "NodePort"),
NodePortTest,
)
var _ = Describe("Storage class of persistent volumes and persistent volume claims can be changed during restores",
Label("Basic", "StorageClass"), StorageClasssChangingTest)
var _ = Describe(
"Storage class of persistent volumes and persistent volume claims can be changed during restores",
Label("Basic", "StorageClass"),
StorageClasssChangingTest,
)
var _ = Describe("Node selectors of persistent volume claims can be changed during restores",
Label("Basic", "SelectedNode", "SKIP_KIND"), PVCSelectedNodeChangingTest)
var _ = Describe(
"Node selectors of persistent volume claims can be changed during restores",
Label("Basic", "SelectedNode", "SKIP_KIND"),
PVCSelectedNodeChangingTest,
)
var _ = Describe("Backup/restore of 2500 namespaces",
Label("Scale", "LongTime"), MultiNSBackupRestore)
var _ = Describe(
"Backup/restore of 2500 namespaces",
Label("Scale", "LongTime"),
MultiNSBackupRestore,
)
// Upgrade test by Kibishii using Restic
var _ = Describe("Velero upgrade tests on cluster using the plugin provider for object storage and Restic for volume backups",
Label("Upgrade", "Restic"), BackupUpgradeRestoreWithRestic)
var _ = Describe("Velero upgrade tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Upgrade", "Snapshot", "SkipVanillaZfs"), BackupUpgradeRestoreWithSnapshots)
var _ = Describe(
"Velero upgrade tests on cluster using the plugin provider for object storage and Restic for volume backups",
Label("Upgrade", "Restic"),
BackupUpgradeRestoreWithRestic,
)
var _ = Describe(
"Velero upgrade tests on cluster using the plugin provider for object storage and snapshots for volume backups",
Label("Upgrade", "Snapshot", "SkipVanillaZfs"),
BackupUpgradeRestoreWithSnapshots,
)
// test filter objects by namespace, type, or labels when backup or restore.
var _ = Describe("Resources with the label velero.io/exclude-from-backup=true are not included in backup",
Label("ResourceFiltering", "ExcludeFromBackup"), ExcludeFromBackupTest)
var _ = Describe("Velero test on exclude namespace from the cluster backup",
Label("ResourceFiltering", "ExcludeNamespaces", "Backup"), BackupWithExcludeNamespaces)
var _ = Describe("Velero test on exclude namespace from the cluster restore",
Label("ResourceFiltering", "ExcludeNamespaces", "Restore"), RestoreWithExcludeNamespaces)
var _ = Describe("Velero test on exclude resources from the cluster backup",
Label("ResourceFiltering", "ExcludeResources", "Backup"), BackupWithExcludeResources)
var _ = Describe("Velero test on exclude resources from the cluster restore",
Label("ResourceFiltering", "ExcludeResources", "Restore"), RestoreWithExcludeResources)
var _ = Describe("Velero test on include namespace from the cluster backup",
Label("ResourceFiltering", "IncludeNamespaces", "Backup"), BackupWithIncludeNamespaces)
var _ = Describe("Velero test on include namespace from the cluster restore",
Label("ResourceFiltering", "IncludeNamespaces", "Restore"), RestoreWithIncludeNamespaces)
var _ = Describe("Velero test on include resources from the cluster backup",
Label("ResourceFiltering", "IncludeResources", "Backup"), BackupWithIncludeResources)
var _ = Describe("Velero test on include resources from the cluster restore",
Label("ResourceFiltering", "IncludeResources", "Restore"), RestoreWithIncludeResources)
var _ = Describe("Velero test on backup include resources matching the label selector",
Label("ResourceFiltering", "LabelSelector"), BackupWithLabelSelector)
var _ = Describe("Velero test on skip backup of volume by resource policies",
Label("ResourceFiltering", "ResourcePolicies", "Restic"), ResourcePoliciesTest)
var _ = Describe(
"Resources with the label velero.io/exclude-from-backup=true are not included in backup",
Label("ResourceFiltering", "ExcludeFromBackup"),
ExcludeFromBackupTest,
)
var _ = Describe(
"Velero test on exclude namespace from the cluster backup",
Label("ResourceFiltering", "ExcludeNamespaces", "Backup"),
BackupWithExcludeNamespaces,
)
var _ = Describe(
"Velero test on exclude namespace from the cluster restore",
Label("ResourceFiltering", "ExcludeNamespaces", "Restore"),
RestoreWithExcludeNamespaces,
)
var _ = Describe(
"Velero test on exclude resources from the cluster backup",
Label("ResourceFiltering", "ExcludeResources", "Backup"),
BackupWithExcludeResources,
)
var _ = Describe(
"Velero test on exclude resources from the cluster restore",
Label("ResourceFiltering", "ExcludeResources", "Restore"),
RestoreWithExcludeResources,
)
var _ = Describe(
"Velero test on include namespace from the cluster backup",
Label("ResourceFiltering", "IncludeNamespaces", "Backup"),
BackupWithIncludeNamespaces,
)
var _ = Describe(
"Velero test on include namespace from the cluster restore",
Label("ResourceFiltering", "IncludeNamespaces", "Restore"),
RestoreWithIncludeNamespaces,
)
var _ = Describe(
"Velero test on include resources from the cluster backup",
Label("ResourceFiltering", "IncludeResources", "Backup"),
BackupWithIncludeResources,
)
var _ = Describe(
"Velero test on include resources from the cluster restore",
Label("ResourceFiltering", "IncludeResources", "Restore"),
RestoreWithIncludeResources,
)
var _ = Describe(
"Velero test on backup include resources matching the label selector",
Label("ResourceFiltering", "LabelSelector"),
BackupWithLabelSelector,
)
var _ = Describe(
"Velero test on skip backup of volume by resource policies",
Label("ResourceFiltering", "ResourcePolicies", "Restic"),
ResourcePoliciesTest,
)
// backup VolumeInfo test
var _ = Describe("", Label("BackupVolumeInfo", "SkippedVolume"), SkippedVolumeInfoTest)
var _ = Describe("", Label("BackupVolumeInfo", "FilesystemUpload"), FilesystemUploadVolumeInfoTest)
var _ = Describe("", Label("BackupVolumeInfo", "CSIDataMover"), CSIDataMoverVolumeInfoTest)
var _ = Describe("", Label("BackupVolumeInfo", "CSISnapshot"), CSISnapshotVolumeInfoTest)
var _ = Describe("", Label("BackupVolumeInfo", "NativeSnapshot"), NativeSnapshotVolumeInfoTest)
var _ = Describe(
"",
Label("BackupVolumeInfo", "SkippedVolume"),
SkippedVolumeInfoTest,
)
var _ = Describe(
"",
Label("BackupVolumeInfo", "FilesystemUpload"),
FilesystemUploadVolumeInfoTest,
)
var _ = Describe(
"",
Label("BackupVolumeInfo", "CSIDataMover"),
CSIDataMoverVolumeInfoTest,
)
var _ = Describe(
"",
Label("BackupVolumeInfo", "CSISnapshot"),
CSISnapshotVolumeInfoTest,
)
var _ = Describe(
"",
Label("BackupVolumeInfo", "NativeSnapshot"),
NativeSnapshotVolumeInfoTest,
)
var _ = Describe("Velero test on resource modifiers from the cluster restore",
Label("ResourceModifier", "Restore"), ResourceModifiersTest)
var _ = Describe(
"Velero test on resource modifiers from the cluster restore",
Label("ResourceModifier", "Restore"),
ResourceModifiersTest,
)
var _ = Describe("Velero tests of Restic backup deletion",
Label("Backups", "Deletion", "Restic"), BackupDeletionWithRestic)
var _ = Describe("Velero tests of snapshot backup deletion",
Label("Backups", "Deletion", "Snapshot", "SkipVanillaZfs"), BackupDeletionWithSnapshots)
var _ = Describe("Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted",
Label("Backups", "TTL", "LongTime", "Snapshot", "SkipVanillaZfs"), TTLTest)
var _ = Describe("Backups in object storage are synced to a new Velero and deleted backups in object storage are synced to be deleted in Velero",
Label("Backups", "BackupsSync"), BackupsSyncTest)
var _ = Describe(
"Velero tests of Restic backup deletion",
Label("Backups", "Deletion", "Restic"),
BackupDeletionWithRestic,
)
var _ = Describe(
"Velero tests of snapshot backup deletion",
Label("Backups", "Deletion", "Snapshot", "SkipVanillaZfs"),
BackupDeletionWithSnapshots,
)
var _ = Describe(
"Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted",
Label("Backups", "TTL", "LongTime", "Snapshot", "SkipVanillaZfs"),
TTLTest,
)
var _ = Describe(
"Backups in object storage are synced to a new Velero and deleted backups in object storage are synced to be deleted in Velero",
Label("Backups", "BackupsSync"),
BackupsSyncTest,
)
var _ = Describe("Backup will be created periodically by schedule defined by a Cron expression",
Label("Schedule", "BR", "Pause", "LongTime"), ScheduleBackupTest)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("Schedule", "OrderedResources", "LongTime"), ScheduleOrderedResources)
var _ = Describe("Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
Label("Schedule", "BackupCreation", "SKIP_KIND", "LongTime"), ScheduleBackupCreationTest)
var _ = Describe(
"Backup will be created periodically by schedule defined by a Cron expression",
Label("Schedule", "Periodical", "Pause", "LongTime"),
SchedulePeriodicalTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("Schedule", "OrderedResources"),
ScheduleOrderedResources,
)
var _ = Describe(
"Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
Label("Schedule", "InProgress", "SKIP_KIND", "LongTime"),
ScheduleInProgressTest,
)
var _ = Describe("Velero test on ssr object when controller namespace mix-ups",
Label("PrivilegesMgmt", "SSR"), SSRTest)
var _ = Describe(
"Velero test on ssr object when controller namespace mix-ups",
Label("PrivilegesMgmt", "SSR"),
SSRTest,
)
var _ = Describe("Local backups will be deleted once the corresponding backup storage location is deleted",
Label("BSL", "Deletion", "Snapshot", "SkipVanillaZfs"), BslDeletionWithSnapshots)
var _ = Describe("Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted",
Label("BSL", "Deletion", "Restic"), BslDeletionWithRestic)
var _ = Describe(
"Local backups will be deleted once the corresponding backup storage location is deleted",
Label("BSL", "Deletion", "Snapshot", "SkipVanillaZfs"),
BslDeletionWithSnapshots,
)
var _ = Describe(
"Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted",
Label("BSL", "Deletion", "Restic"),
BslDeletionWithRestic,
)
var _ = Describe("Migrate resources between clusters by Restic",
Label("Migration", "Restic"), MigrationWithRestic)
var _ = Describe("Migrate resources between clusters by snapshot",
Label("Migration", "Snapshot", "SkipVanillaZfs"), MigrationWithSnapshots)
var _ = Describe(
"Migrate resources between clusters by FileSystem backup",
Label("Migration", "FSB"),
MigrationWithFS,
)
var _ = Describe(
"Migrate resources between clusters by snapshot",
Label("Migration", "Snapshot", "SkipVanillaZfs"),
MigrationWithSnapshots,
)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Single", "Restic"), OneNamespaceMappingResticTest)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Multiple", "Restic"), MultiNamespacesMappingResticTest)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Single", "Snapshot", "SkipVanillaZfs"), OneNamespaceMappingSnapshotTest)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Multiple", "Snapshot", "SkipVanillaZfs"), MultiNamespacesMappingSnapshotTest)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Single", "Restic"),
OneNamespaceMappingResticTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Multiple", "Restic"),
MultiNamespacesMappingResticTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Single", "Snapshot", "SkipVanillaZfs"),
OneNamespaceMappingSnapshotTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("NamespaceMapping", "Multiple", "Snapshot", "SkipVanillaZfs"),
MultiNamespacesMappingSnapshotTest,
)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptIn"), OptInPVBackupTest)
var _ = Describe("Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptOut"), OptOutPVBackupTest)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptIn"),
OptInPVBackupTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptOut"),
OptOutPVBackupTest,
)
var _ = Describe("Velero test on parallel files upload",
Label("UploaderConfig", "ParallelFilesUpload"), ParallelFilesUploadTest)
var _ = Describe("Velero test on parallel files download",
Label("UploaderConfig", "ParallelFilesDownload"), ParallelFilesDownloadTest)
var _ = Describe(
"Velero test on parallel files upload",
Label("UploaderConfig", "ParallelFilesUpload"),
ParallelFilesUploadTest,
)
var _ = Describe(
"Velero test on parallel files download",
Label("UploaderConfig", "ParallelFilesDownload"),
ParallelFilesDownloadTest,
)
func GetKubeConfigContext() error {
var err error
var tcDefault, tcStandby TestClient
tcDefault, err = NewTestClient(VeleroCfg.DefaultClusterContext)
VeleroCfg.DefaultClient = &tcDefault
VeleroCfg.ClientToInstallVelero = VeleroCfg.DefaultClient
VeleroCfg.ClusterToInstallVelero = VeleroCfg.DefaultClusterName
VeleroCfg.ServiceAccountNameToInstall = VeleroCfg.DefaultCLSServiceAccountName
var tcDefault, tcStandby k8s.TestClient
tcDefault, err = k8s.NewTestClient(test.VeleroCfg.DefaultClusterContext)
test.VeleroCfg.DefaultClient = &tcDefault
test.VeleroCfg.ClientToInstallVelero = test.VeleroCfg.DefaultClient
test.VeleroCfg.ClusterToInstallVelero = test.VeleroCfg.DefaultClusterName
test.VeleroCfg.ServiceAccountNameToInstall = test.VeleroCfg.DefaultCLSServiceAccountName
if err != nil {
return err
}
if VeleroCfg.DefaultClusterContext != "" {
err = KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultClusterContext)
if test.VeleroCfg.DefaultClusterContext != "" {
err = k8s.KubectlConfigUseContext(context.Background(), test.VeleroCfg.DefaultClusterContext)
if err != nil {
return err
}
if VeleroCfg.StandbyClusterContext != "" {
tcStandby, err = NewTestClient(VeleroCfg.StandbyClusterContext)
VeleroCfg.StandbyClient = &tcStandby
if test.VeleroCfg.StandbyClusterContext != "" {
tcStandby, err = k8s.NewTestClient(test.VeleroCfg.StandbyClusterContext)
test.VeleroCfg.StandbyClient = &tcStandby
if err != nil {
return err
}
@@ -274,14 +669,14 @@ func TestE2e(t *testing.T) {
t.Skip("Skipping E2E tests")
}
if !slices.Contains(LocalCloudProviders, VeleroCfg.CloudProvider) {
if !slices.Contains(test.LocalCloudProviders, test.VeleroCfg.CloudProvider) {
fmt.Println("For cloud platforms, object store plugin provider will be set as cloud provider")
// If ObjectStoreProvider is not provided, then using the value same as CloudProvider
if VeleroCfg.ObjectStoreProvider == "" {
VeleroCfg.ObjectStoreProvider = VeleroCfg.CloudProvider
if test.VeleroCfg.ObjectStoreProvider == "" {
test.VeleroCfg.ObjectStoreProvider = test.VeleroCfg.CloudProvider
}
} else {
if VeleroCfg.ObjectStoreProvider == "" {
if test.VeleroCfg.ObjectStoreProvider == "" {
t.Error(errors.New("No object store provider specified - must be specified when using kind as the cloud provider")) // Must have an object store provider
}
}
@@ -297,19 +692,67 @@ func TestE2e(t *testing.T) {
}
var _ = BeforeSuite(func() {
if InstallVelero {
By("Install StorageClass for E2E.")
Expect(veleroutil.InstallStorageClasses(test.VeleroCfg.CloudProvider)).To(Succeed())
if strings.EqualFold(test.VeleroCfg.Features, test.FeatureCSI) &&
test.VeleroCfg.UseVolumeSnapshots {
By("Install VolumeSnapshotClass for E2E.")
Expect(
k8s.KubectlApplyByFile(
context.Background(),
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", test.VeleroCfg.CloudProvider),
),
).To(Succeed())
}
if test.InstallVelero {
By("Install test resources before testing")
Expect(PrepareVelero(context.Background(), "install resource before testing", VeleroCfg)).To(Succeed())
Expect(
veleroutil.PrepareVelero(
context.Background(),
"install resource before testing",
test.VeleroCfg,
),
).To(Succeed())
}
})
var _ = AfterSuite(func() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
By("Delete StorageClasses created by E2E")
Expect(
k8s.DeleteStorageClass(
ctx,
*test.VeleroCfg.ClientToInstallVelero,
test.StorageClassName,
),
).To(Succeed())
Expect(
k8s.DeleteStorageClass(
ctx,
*test.VeleroCfg.ClientToInstallVelero,
test.StorageClassName2,
),
).To(Succeed())
if strings.EqualFold(test.VeleroCfg.Features, test.FeatureCSI) &&
test.VeleroCfg.UseVolumeSnapshots {
By("Delete VolumeSnapshotClass created by E2E")
Expect(
k8s.KubectlDeleteByFile(
ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", test.VeleroCfg.CloudProvider),
),
).To(Succeed())
}
// If the Velero is installed during test, and the FailFast is not enabled,
// uninstall Velero. If not, either Velero is not installed, or kept it for debug on failure.
if InstallVelero && (testSuitePassed || !VeleroCfg.FailFast) {
if test.InstallVelero && (testSuitePassed || !test.VeleroCfg.FailFast) {
By("release test resources after testing")
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed())
Expect(veleroutil.VeleroUninstall(ctx, test.VeleroCfg)).To(Succeed())
}
})

View File

@@ -17,347 +17,455 @@ package migration
import (
"context"
"flag"
"fmt"
"strings"
"time"
"github.com/google/uuid"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/vmware-tanzu/velero/test"
"github.com/vmware-tanzu/velero/test"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
util "github.com/vmware-tanzu/velero/test/util/csi"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/kibishii"
. "github.com/vmware-tanzu/velero/test/util/providers"
. "github.com/vmware-tanzu/velero/test/util/velero"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
"github.com/vmware-tanzu/velero/test/util/kibishii"
"github.com/vmware-tanzu/velero/test/util/providers"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
var migrationNamespace string
var veleroCfg VeleroConfig
type migrationE2E struct {
framework.TestCase
useVolumeSnapshots bool
veleroCLI2Version test.VeleroCLI2Version
kibishiiData kibishii.KibishiiData
}
func MigrationWithSnapshots() {
veleroCfg = VeleroCfg
for _, veleroCLI2Version := range GetVersionList(veleroCfg.MigrateFromVeleroCLI, veleroCfg.MigrateFromVeleroVersion) {
MigrationTest(true, veleroCLI2Version)
for _, veleroCLI2Version := range veleroutil.GetVersionList(
test.VeleroCfg.MigrateFromVeleroCLI,
test.VeleroCfg.MigrateFromVeleroVersion) {
framework.TestFunc(
&migrationE2E{
useVolumeSnapshots: true,
veleroCLI2Version: veleroCLI2Version,
},
)()
}
}
func MigrationWithRestic() {
veleroCfg = VeleroCfg
for _, veleroCLI2Version := range GetVersionList(veleroCfg.MigrateFromVeleroCLI, veleroCfg.MigrateFromVeleroVersion) {
MigrationTest(false, veleroCLI2Version)
func MigrationWithFS() {
for _, veleroCLI2Version := range veleroutil.GetVersionList(
test.VeleroCfg.MigrateFromVeleroCLI,
test.VeleroCfg.MigrateFromVeleroVersion) {
framework.TestFunc(
&migrationE2E{
useVolumeSnapshots: false,
veleroCLI2Version: veleroCLI2Version,
},
)()
}
}
func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) {
var (
backupName, restoreName string
backupScName, restoreScName string
kibishiiWorkerCount int
err error
)
BeforeEach(func() {
kibishiiWorkerCount = 3
veleroCfg = VeleroCfg
UUIDgen, err = uuid.NewRandom()
migrationNamespace = "migration-" + UUIDgen.String()
if useVolumeSnapshots && veleroCfg.CloudProvider == Kind {
Skip(fmt.Sprintf("Volume snapshots not supported on %s", Kind))
}
func (m *migrationE2E) Init() error {
By("Call the base E2E init", func() {
Expect(m.TestCase.Init()).To(Succeed())
})
if veleroCfg.DefaultClusterContext == "" && veleroCfg.StandbyClusterContext == "" {
By("Skip check", func() {
if m.VeleroCfg.DefaultClusterContext == "" || m.VeleroCfg.StandbyClusterContext == "" {
Skip("Migration test needs 2 clusters")
}
// need to uninstall Velero first in case of the affection of the existing global velero installation
if InstallVelero {
By("Uninstall Velero", func() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
})
if m.useVolumeSnapshots && m.VeleroCfg.CloudProvider == test.Kind {
Skip(fmt.Sprintf("Volume snapshots not supported on %s", test.Kind))
}
if m.VeleroCfg.SnapshotMoveData && !m.useVolumeSnapshots {
Skip("FSB migration test is not needed in data mover scenario")
}
})
AfterEach(func() {
if CurrentSpecReport().Failed() && veleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
m.kibishiiData = *kibishii.DefaultKibishiiData
m.kibishiiData.ExpectedNodes = 3
m.CaseBaseName = "migration-" + m.UUIDgen
m.BackupName = m.CaseBaseName + "-backup"
m.RestoreName = m.CaseBaseName + "-restore"
m.NSIncluded = &[]string{m.CaseBaseName}
m.RestoreArgs = []string{
"create", "--namespace", m.VeleroCfg.VeleroNamespace,
"restore", m.RestoreName,
"--from-backup", m.BackupName, "--wait",
}
// Message output by ginkgo
m.TestMsg = &framework.TestMSG{
Desc: "Test migration workload on two clusters",
FailedMSG: "Fail to test migrate between two clusters",
Text: "Test back up on default cluster, restore on standby cluster",
}
// Need to uninstall Velero on the default cluster.
if test.InstallVelero {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(veleroutil.VeleroUninstall(ctx, m.VeleroCfg)).To(Succeed())
}
return nil
}
func (m *migrationE2E) Backup() error {
OriginVeleroCfg := m.VeleroCfg
var err error
if m.veleroCLI2Version.VeleroCLI == "" {
//Assume tag of velero server image is identical to velero CLI version
//Download velero CLI if it's empty according to velero CLI version
By(
fmt.Sprintf("Install the expected version Velero CLI %s",
m.veleroCLI2Version.VeleroVersion),
func() {
// "self" represents 1.14.x and future versions
if m.veleroCLI2Version.VeleroVersion == "self" {
m.veleroCLI2Version.VeleroCLI = m.VeleroCfg.VeleroCLI
} else {
OriginVeleroCfg, err = veleroutil.SetImagesToDefaultValues(
OriginVeleroCfg,
m.veleroCLI2Version.VeleroVersion,
)
Expect(err).To(Succeed(),
"Fail to set images for the migrate-from Velero installation.")
m.veleroCLI2Version.VeleroCLI, err = veleroutil.InstallVeleroCLI(
m.veleroCLI2Version.VeleroVersion)
Expect(err).To(Succeed())
}
},
)
}
By(fmt.Sprintf("Install Velero on default cluster (%s)", m.VeleroCfg.DefaultClusterContext),
func() {
Expect(k8sutil.KubectlConfigUseContext(
m.Ctx, m.VeleroCfg.DefaultClusterContext)).To(Succeed())
OriginVeleroCfg.MigrateFromVeleroVersion = m.veleroCLI2Version.VeleroVersion
OriginVeleroCfg.VeleroCLI = m.veleroCLI2Version.VeleroCLI
OriginVeleroCfg.ClientToInstallVelero = OriginVeleroCfg.DefaultClient
OriginVeleroCfg.ClusterToInstallVelero = m.VeleroCfg.DefaultClusterName
OriginVeleroCfg.ServiceAccountNameToInstall = m.VeleroCfg.DefaultCLSServiceAccountName
OriginVeleroCfg.UseVolumeSnapshots = m.useVolumeSnapshots
OriginVeleroCfg.UseNodeAgent = !m.useVolumeSnapshots
version, err := veleroutil.GetVeleroVersion(m.Ctx, OriginVeleroCfg.VeleroCLI, true)
Expect(err).To(Succeed(), "Fail to get Velero version")
OriginVeleroCfg.VeleroVersion = version
if OriginVeleroCfg.SnapshotMoveData {
OriginVeleroCfg.UseNodeAgent = true
}
Expect(veleroutil.VeleroInstall(m.Ctx, &OriginVeleroCfg, false)).To(Succeed())
if m.veleroCLI2Version.VeleroVersion != "self" {
Expect(veleroutil.CheckVeleroVersion(
m.Ctx,
OriginVeleroCfg.VeleroCLI,
OriginVeleroCfg.MigrateFromVeleroVersion,
)).To(Succeed())
}
},
)
By("Create namespace for sample workload", func() {
Expect(k8sutil.CreateNamespace(
m.Ctx,
*m.VeleroCfg.DefaultClient,
m.CaseBaseName,
)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s to install Kibishii workload",
m.CaseBaseName))
})
By("Deploy sample workload of Kibishii", func() {
Expect(kibishii.KibishiiPrepareBeforeBackup(
m.Ctx,
*OriginVeleroCfg.DefaultClient,
OriginVeleroCfg.CloudProvider,
m.CaseBaseName,
OriginVeleroCfg.RegistryCredentialFile,
OriginVeleroCfg.Features,
OriginVeleroCfg.KibishiiDirectory,
OriginVeleroCfg.UseVolumeSnapshots,
&m.kibishiiData,
)).To(Succeed())
})
By(fmt.Sprintf("Backup namespace %s", m.CaseBaseName), func() {
m.BackupArgs = []string{
"create", "--namespace", m.VeleroCfg.VeleroNamespace,
"backup", m.BackupName,
"--include-namespaces", strings.Join(*m.NSIncluded, ","),
"--wait",
}
if m.useVolumeSnapshots {
m.BackupArgs = append(m.BackupArgs, "--snapshot-volumes=true")
} else {
By(fmt.Sprintf("Uninstall Velero on cluster %s", veleroCfg.DefaultClusterContext), func() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
DeleteNamespace(context.Background(), *veleroCfg.DefaultClient, migrationNamespace, true)
})
By(fmt.Sprintf("Uninstall Velero on cluster %s", veleroCfg.StandbyClusterContext), func() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyClusterContext)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
DeleteNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace, true)
})
if InstallVelero {
By(fmt.Sprintf("Delete sample workload namespace %s", migrationNamespace), func() {
DeleteNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace, true)
})
}
By(fmt.Sprintf("Switch to default kubeconfig context %s", veleroCfg.DefaultClusterContext), func() {
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed())
veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient
veleroCfg.ClusterToInstallVelero = veleroCfg.DefaultClusterName
})
m.BackupArgs = append(m.BackupArgs, "--default-volumes-to-fs-backup")
}
})
When("kibishii is the sample workload", func() {
It("should be successfully backed up and restored to the default BackupStorageLocation", func() {
var backupNames []string
if veleroCfg.SnapshotMoveData {
if !useVolumeSnapshots {
Skip("FSB migration test is not needed in data mover scenario")
}
}
oneHourTimeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*60)
defer ctxCancel()
flag.Parse()
UUIDgen, err = uuid.NewRandom()
Expect(err).To(Succeed())
supportUploaderType, err := IsSupportUploaderType(veleroCLI2Version.VeleroVersion)
Expect(err).To(Succeed())
OriginVeleroCfg := veleroCfg
if veleroCLI2Version.VeleroCLI == "" {
//Assume tag of velero server image is identical to velero CLI version
//Download velero CLI if it's empty according to velero CLI version
By(fmt.Sprintf("Install the expected version Velero CLI (%s) for installing Velero",
veleroCLI2Version.VeleroVersion), func() {
//"self" represents 1.14.x and future versions
if veleroCLI2Version.VeleroVersion == "self" {
veleroCLI2Version.VeleroCLI = veleroCfg.VeleroCLI
} else {
fmt.Printf("Using default images address of Velero CLI %s\n", veleroCLI2Version.VeleroVersion)
OriginVeleroCfg.VeleroImage = ""
OriginVeleroCfg.RestoreHelperImage = ""
OriginVeleroCfg.Plugins = ""
if OriginVeleroCfg.SnapshotMoveData {
m.BackupArgs = append(m.BackupArgs, "--snapshot-move-data")
}
// It is for v1.13.x migration scenario only, because since v1.14, nightly CI won't
// pass velero-plugin-for-csi to E2E test anymore, and velero installation will not
// fetch velero-plugin-for-csi automatically, so add it as hardcode below.
// TODO: remove this section from future version like v1.15, e.g.
if OriginVeleroCfg.CloudProvider == Azure {
OriginVeleroCfg.Plugins = "velero/velero-plugin-for-microsoft-azure:v1.9.0"
}
if OriginVeleroCfg.CloudProvider == AWS {
OriginVeleroCfg.Plugins = "velero/velero-plugin-for-aws:v1.9.0"
}
if strings.Contains(OriginVeleroCfg.Features, FeatureCSI) {
OriginVeleroCfg.Plugins = OriginVeleroCfg.Plugins + ",velero/velero-plugin-for-csi:v0.7.0"
}
if OriginVeleroCfg.SnapshotMoveData {
if OriginVeleroCfg.CloudProvider == Azure {
OriginVeleroCfg.Plugins = OriginVeleroCfg.Plugins + ",velero/velero-plugin-for-aws:v1.9.0"
}
}
veleroCLI2Version.VeleroCLI, err = InstallVeleroCLI(veleroCLI2Version.VeleroVersion)
Expect(err).To(Succeed())
}
})
}
By(fmt.Sprintf("Install Velero in cluster-A (%s) to backup workload", veleroCfg.DefaultClusterContext), func() {
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed())
OriginVeleroCfg.MigrateFromVeleroVersion = veleroCLI2Version.VeleroVersion
OriginVeleroCfg.VeleroCLI = veleroCLI2Version.VeleroCLI
OriginVeleroCfg.ClientToInstallVelero = OriginVeleroCfg.DefaultClient
OriginVeleroCfg.ClusterToInstallVelero = veleroCfg.DefaultClusterName
OriginVeleroCfg.ServiceAccountNameToInstall = veleroCfg.DefaultCLSServiceAccountName
OriginVeleroCfg.UseVolumeSnapshots = useVolumeSnapshots
OriginVeleroCfg.UseNodeAgent = !useVolumeSnapshots
version, err := GetVeleroVersion(oneHourTimeout, OriginVeleroCfg.VeleroCLI, true)
Expect(err).To(Succeed(), "Fail to get Velero version")
OriginVeleroCfg.VeleroVersion = version
if OriginVeleroCfg.SnapshotMoveData {
OriginVeleroCfg.UseNodeAgent = true
}
Expect(VeleroInstall(context.Background(), &OriginVeleroCfg, false)).To(Succeed())
if veleroCLI2Version.VeleroVersion != "self" {
Expect(CheckVeleroVersion(context.Background(), OriginVeleroCfg.VeleroCLI,
OriginVeleroCfg.MigrateFromVeleroVersion)).To(Succeed())
}
})
backupName = "backup-" + UUIDgen.String()
backupScName = backupName + "-sc"
restoreName = "restore-" + UUIDgen.String()
restoreScName = restoreName + "-sc"
By("Create namespace for sample workload", func() {
Expect(CreateNamespace(oneHourTimeout, *veleroCfg.DefaultClient, migrationNamespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s to install Kibishii workload", migrationNamespace))
})
KibishiiData := *DefaultKibishiiData
By("Deploy sample workload of Kibishii", func() {
KibishiiData.ExpectedNodes = kibishiiWorkerCount
Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *veleroCfg.DefaultClient, veleroCfg.CloudProvider,
migrationNamespace, veleroCfg.RegistryCredentialFile, veleroCfg.Features,
veleroCfg.KibishiiDirectory, useVolumeSnapshots, &KibishiiData)).To(Succeed())
})
By(fmt.Sprintf("Backup namespace %s", migrationNamespace), func() {
var BackupStorageClassCfg BackupConfig
BackupStorageClassCfg.BackupName = backupScName
BackupStorageClassCfg.IncludeResources = "StorageClass"
BackupStorageClassCfg.IncludeClusterResources = true
//TODO Remove UseRestic parameter once minor version is 1.10 or upper
BackupStorageClassCfg.UseResticIfFSBackup = !supportUploaderType
Expect(VeleroBackupNamespace(context.Background(), OriginVeleroCfg.VeleroCLI,
OriginVeleroCfg.VeleroNamespace, BackupStorageClassCfg)).To(Succeed(), func() string {
RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, BackupStorageClassCfg.BackupName, "")
return "Fail to backup workload"
})
backupNames = append(backupNames, BackupStorageClassCfg.BackupName)
var BackupCfg BackupConfig
BackupCfg.BackupName = backupName
BackupCfg.Namespace = migrationNamespace
BackupCfg.UseVolumeSnapshots = useVolumeSnapshots
BackupCfg.BackupLocation = ""
BackupCfg.Selector = ""
BackupCfg.DefaultVolumesToFsBackup = !useVolumeSnapshots
//TODO Remove UseRestic parameter once minor version is 1.10 or upper
BackupCfg.UseResticIfFSBackup = !supportUploaderType
BackupCfg.SnapshotMoveData = OriginVeleroCfg.SnapshotMoveData
Expect(VeleroBackupNamespace(context.Background(), OriginVeleroCfg.VeleroCLI,
OriginVeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string {
RunDebug(context.Background(), OriginVeleroCfg.VeleroCLI, OriginVeleroCfg.VeleroNamespace, BackupCfg.BackupName, "")
return "Fail to backup workload"
})
backupNames = append(backupNames, BackupCfg.BackupName)
})
if useVolumeSnapshots {
if veleroCfg.CloudProvider == Vsphere {
// TODO - remove after upload progress monitoring is implemented
By("Waiting for vSphere uploads to complete", func() {
Expect(WaitForVSphereUploadCompletion(context.Background(), time.Hour,
migrationNamespace, kibishiiWorkerCount)).To(Succeed())
})
}
var snapshotCheckPoint SnapshotCheckPoint
snapshotCheckPoint.NamespaceBackedUp = migrationNamespace
if OriginVeleroCfg.SnapshotMoveData {
//VolumeSnapshotContent should be deleted after data movement
_, err := util.CheckVolumeSnapshotCR(*veleroCfg.DefaultClient, map[string]string{"namespace": migrationNamespace}, 0)
Expect(err).NotTo(HaveOccurred(), "VSC count is not as expected 0")
} else {
// the snapshots of AWS may be still in pending status when do the restore, wait for a while
// to avoid this https://github.com/vmware-tanzu/velero/issues/1799
// TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed
if veleroCfg.CloudProvider == Azure && strings.EqualFold(veleroCfg.Features, FeatureCSI) || veleroCfg.CloudProvider == AWS {
By("Sleep 5 minutes to avoid snapshot recreated by unknown reason ", func() {
time.Sleep(5 * time.Minute)
})
}
By("Snapshot should be created in cloud object store with retain policy", func() {
snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.DefaultClient, veleroCfg, kibishiiWorkerCount,
migrationNamespace, backupName, GetKibishiiPVCNameList(kibishiiWorkerCount))
Expect(err).NotTo(HaveOccurred(), "Fail to get snapshot checkpoint")
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket,
veleroCfg.BSLConfig, backupName, snapshotCheckPoint)).To(Succeed())
})
}
}
By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", veleroCfg.StandbyClusterContext), func() {
//Ensure workload of "migrationNamespace" existed in cluster-A
ns, err := GetNamespace(context.Background(), *veleroCfg.DefaultClient, migrationNamespace)
Expect(ns.Name).To(Equal(migrationNamespace))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("get namespace in source cluster err: %v", err))
//Ensure cluster-B is the target cluster
Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyClusterContext)).To(Succeed())
_, err = GetNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace)
Expect(err).To(HaveOccurred(), fmt.Sprintf("get namespace in dst cluster successfully, it's not as expected: %s", migrationNamespace))
fmt.Println(err)
Expect(strings.Contains(fmt.Sprint(err), "namespaces \""+migrationNamespace+"\" not found")).Should(BeTrue())
veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient
veleroCfg.ClusterToInstallVelero = veleroCfg.StandbyClusterName
veleroCfg.ServiceAccountNameToInstall = veleroCfg.StandbyCLSServiceAccountName
veleroCfg.UseNodeAgent = !useVolumeSnapshots
veleroCfg.UseRestic = false
if veleroCfg.SnapshotMoveData {
veleroCfg.UseNodeAgent = true
// For SnapshotMoveData pipelines, we should use standby cluster setting for Velero installation
// In nightly CI, StandbyClusterPlugins is set properly if pipeline is for SnapshotMoveData.
veleroCfg.Plugins = veleroCfg.StandbyClusterPlugins
veleroCfg.ObjectStoreProvider = veleroCfg.StandbyClusterObjectStoreProvider
}
Expect(VeleroInstall(context.Background(), &veleroCfg, true)).To(Succeed())
})
By(fmt.Sprintf("Waiting for backups sync to Velero in cluster-B (%s)", veleroCfg.StandbyClusterContext), func() {
Expect(WaitForBackupToBeCreated(context.Background(), backupName, 5*time.Minute, &veleroCfg)).To(Succeed())
Expect(WaitForBackupToBeCreated(context.Background(), backupScName, 5*time.Minute, &veleroCfg)).To(Succeed())
})
By(fmt.Sprintf("Restore %s", migrationNamespace), func() {
if OriginVeleroCfg.SnapshotMoveData {
By(fmt.Sprintf("Create a storage class %s for restore PV provisioned by storage class %s on different cloud provider", StorageClassName, KibishiiStorageClassName), func() {
Expect(InstallStorageClass(context.Background(), fmt.Sprintf("../testdata/storage-class/%s.yaml", veleroCfg.StandbyClusterCloudProvider))).To(Succeed())
})
configmaptName := "datamover-storage-class-config"
labels := map[string]string{"velero.io/change-storage-class": "RestoreItemAction",
"velero.io/plugin-config": ""}
data := map[string]string{KibishiiStorageClassName: StorageClassName}
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", configmaptName, veleroCfg.VeleroNamespace), func() {
_, err := CreateConfigMap(veleroCfg.StandbyClient.ClientGo, veleroCfg.VeleroNamespace, configmaptName, labels, data)
Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", veleroCfg.VeleroNamespace))
})
} else {
Expect(VeleroRestore(context.Background(), veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace, restoreScName, backupScName, "StorageClass")).To(Succeed(), func() string {
RunDebug(context.Background(), veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace, "", restoreName)
return "Fail to restore workload"
})
}
Expect(VeleroRestore(context.Background(), veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace, restoreName, backupName, "")).To(Succeed(), func() string {
RunDebug(context.Background(), veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace, "", restoreName)
return "Fail to restore workload"
})
})
By(fmt.Sprintf("Verify workload %s after restore ", migrationNamespace), func() {
Expect(KibishiiVerifyAfterRestore(*veleroCfg.StandbyClient, migrationNamespace,
oneHourTimeout, &KibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
})
// TODO: delete backup created by case self, not all
By("Clean backups after test", func() {
veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient
DeleteBackups(context.Background(), backupNames, &veleroCfg)
})
Expect(veleroutil.VeleroBackupExec(
m.Ctx,
OriginVeleroCfg.VeleroCLI,
OriginVeleroCfg.VeleroNamespace,
m.BackupName,
m.BackupArgs,
)).To(Succeed(), func() string {
veleroutil.RunDebug(
context.Background(),
OriginVeleroCfg.VeleroCLI,
OriginVeleroCfg.VeleroNamespace,
m.BackupName,
"",
)
return "Failed to backup resources"
})
})
if m.useVolumeSnapshots {
// Only wait for the snapshots.backupdriver.cnsdp.vmware.com
// when the vSphere plugin is used.
if OriginVeleroCfg.HasVspherePlugin {
By("Waiting for vSphere uploads to complete", func() {
Expect(
veleroutil.WaitForVSphereUploadCompletion(
context.Background(),
time.Hour,
m.CaseBaseName,
m.kibishiiData.ExpectedNodes,
),
).To(Succeed())
})
}
var snapshotCheckPoint test.SnapshotCheckPoint
snapshotCheckPoint.NamespaceBackedUp = m.CaseBaseName
if OriginVeleroCfg.SnapshotMoveData {
//VolumeSnapshotContent should be deleted after data movement
_, err := util.CheckVolumeSnapshotCR(
*m.VeleroCfg.DefaultClient,
map[string]string{"namespace": m.CaseBaseName},
0,
)
By("Check the VSC account", func() {
Expect(err).NotTo(HaveOccurred(), "VSC count is not as expected 0")
})
} else {
// the snapshots of AWS may be still in pending status when do the restore.
// wait for a while to avoid this https://github.com/vmware-tanzu/velero/issues/1799
if OriginVeleroCfg.CloudProvider == test.Azure &&
strings.EqualFold(OriginVeleroCfg.Features, test.FeatureCSI) ||
OriginVeleroCfg.CloudProvider == test.AWS {
By("Sleep 5 minutes to avoid snapshot recreated by unknown reason ", func() {
time.Sleep(5 * time.Minute)
})
}
By("Snapshot should be created in cloud object store with retain policy", func() {
snapshotCheckPoint, err = veleroutil.GetSnapshotCheckPoint(
*OriginVeleroCfg.DefaultClient,
OriginVeleroCfg,
m.kibishiiData.ExpectedNodes,
m.CaseBaseName,
m.BackupName,
kibishii.GetKibishiiPVCNameList(m.kibishiiData.ExpectedNodes),
)
Expect(err).NotTo(HaveOccurred(), "Fail to get snapshot checkpoint")
Expect(providers.CheckSnapshotsInProvider(
OriginVeleroCfg,
m.BackupName,
snapshotCheckPoint,
false,
)).To(Succeed())
})
}
}
return nil
}
func (m *migrationE2E) Restore() error {
StandbyVeleroCfg := m.VeleroCfg
By("Install Velero in standby cluster.", func() {
// Ensure cluster-B is the target cluster
Expect(k8sutil.KubectlConfigUseContext(
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
// Check the workload namespace not exist in standby cluster.
_, err := k8sutil.GetNamespace(
m.Ctx, *m.VeleroCfg.StandbyClient, m.CaseBaseName)
Expect(err).To(HaveOccurred(), fmt.Sprintf(
"get namespace in dst cluster successfully, it's not as expected: %s", m.CaseBaseName))
Expect(strings.Contains(fmt.Sprint(err), "namespaces \""+m.CaseBaseName+"\" not found")).
Should(BeTrue())
By("Install StorageClass for E2E.")
Expect(veleroutil.InstallStorageClasses(
m.VeleroCfg.StandbyClusterCloudProvider)).To(Succeed())
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
m.VeleroCfg.UseVolumeSnapshots {
By("Install VolumeSnapshotClass for E2E.")
Expect(
k8sutil.KubectlApplyByFile(
m.Ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
m.VeleroCfg.StandbyClusterCloudProvider),
),
).To(Succeed())
}
StandbyVeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
StandbyVeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName
StandbyVeleroCfg.ServiceAccountNameToInstall = m.VeleroCfg.StandbyCLSServiceAccountName
StandbyVeleroCfg.UseNodeAgent = !m.useVolumeSnapshots
if StandbyVeleroCfg.SnapshotMoveData {
StandbyVeleroCfg.UseNodeAgent = true
// For SnapshotMoveData pipelines, we should use standby cluster setting
// for Velero installation.
// In nightly CI, StandbyClusterPlugins is set properly
// if pipeline is for SnapshotMoveData.
StandbyVeleroCfg.Plugins = m.VeleroCfg.StandbyClusterPlugins
StandbyVeleroCfg.ObjectStoreProvider = m.VeleroCfg.StandbyClusterObjectStoreProvider
}
Expect(veleroutil.VeleroInstall(
context.Background(), &StandbyVeleroCfg, true)).To(Succeed())
})
By("Waiting for backups sync to Velero in standby cluster", func() {
Expect(veleroutil.WaitForBackupToBeCreated(
m.Ctx, m.BackupName, 5*time.Minute, &StandbyVeleroCfg)).To(Succeed())
})
By(fmt.Sprintf("Restore %s", m.CaseBaseName), func() {
// Use the e2e-storage-class SC instead of the kibishii-storage-class,
// because the Standby cluster doesn't have a chance to create the kibishii resource.
cmName := "datamover-storage-class-config"
labels := map[string]string{"velero.io/change-storage-class": "RestoreItemAction",
"velero.io/plugin-config": ""}
data := map[string]string{kibishii.KibishiiStorageClassName: test.StorageClassName}
By(fmt.Sprintf("Create ConfigMap %s in namespace %s",
cmName, StandbyVeleroCfg.VeleroNamespace), func() {
_, err := k8sutil.CreateConfigMap(
StandbyVeleroCfg.StandbyClient.ClientGo,
StandbyVeleroCfg.VeleroNamespace,
cmName,
labels,
data,
)
Expect(err).To(Succeed(), fmt.Sprintf(
"failed to create ConfigMap in the namespace %q",
StandbyVeleroCfg.VeleroNamespace))
})
Expect(veleroutil.VeleroRestore(
m.Ctx,
StandbyVeleroCfg.VeleroCLI,
StandbyVeleroCfg.VeleroNamespace,
m.RestoreName,
m.BackupName,
"",
)).To(Succeed(), func() string {
veleroutil.RunDebug(
m.Ctx, StandbyVeleroCfg.VeleroCLI,
StandbyVeleroCfg.VeleroNamespace, "", m.RestoreName)
return "Fail to restore workload"
})
})
return nil
}
func (m *migrationE2E) Verify() error {
By(fmt.Sprintf("Verify workload %s after restore on standby cluster", m.CaseBaseName), func() {
Expect(kibishii.KibishiiVerifyAfterRestore(
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
m.Ctx,
&m.kibishiiData,
"",
)).To(Succeed(), "Fail to verify workload after restore")
})
return nil
}
func (m *migrationE2E) Clean() error {
By("Clean resource on default cluster.", func() {
Expect(m.TestCase.Clean()).To(Succeed())
})
By("Clean resource on standby cluster.", func() {
Expect(k8sutil.KubectlConfigUseContext(
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
m.VeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
m.VeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName
By("Delete StorageClasses created by E2E")
Expect(
k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName,
),
).To(Succeed())
Expect(
k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName2,
),
).To(Succeed())
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
m.VeleroCfg.UseVolumeSnapshots {
By("Delete VolumeSnapshotClass created by E2E")
Expect(
k8sutil.KubectlDeleteByFile(
m.Ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
m.VeleroCfg.StandbyClusterCloudProvider),
),
).To(Succeed())
}
Expect(veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg)).To(Succeed())
Expect(
k8sutil.DeleteNamespace(
m.Ctx,
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
true,
),
).To(Succeed())
})
By("Switch to default KubeConfig context", func() {
Expect(k8sutil.KubectlConfigUseContext(
m.Ctx,
m.VeleroCfg.DefaultClusterContext,
)).To(Succeed())
})
return nil
}

View File

@@ -21,7 +21,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
@@ -90,11 +89,6 @@ func (p *ParallelFilesDownload) Init() error {
}
func (p *ParallelFilesDownload) CreateResources() error {
err := InstallStorageClass(p.Ctx, fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))
if err != nil {
return errors.Wrapf(err, "failed to install storage class for pv backup filtering test")
}
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))

View File

@@ -21,7 +21,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
@@ -81,11 +80,6 @@ func (p *ParallelFilesUpload) Init() error {
}
func (p *ParallelFilesUpload) CreateResources() error {
err := InstallStorageClass(p.Ctx, fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))
if err != nil {
return errors.Wrapf(err, "failed to install storage class for pv backup filtering test")
}
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))

View File

@@ -63,11 +63,6 @@ func (p *PVBackupFiltering) Init() error {
}
func (p *PVBackupFiltering) CreateResources() error {
err := InstallStorageClass(p.Ctx, fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))
if err != nil {
return errors.Wrapf(err, "failed to install storage class for pv backup filtering test")
}
for _, ns := range *p.NSIncluded {
By(fmt.Sprintf("Create namespaces %s for workload\n", ns), func() {
Expect(CreateNamespace(p.Ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))

View File

@@ -144,7 +144,7 @@ func (e *ExcludeFromBackup) Verify() error {
Expect(apierrors.IsNotFound(err)).To(BeTrue())
//Check configmap: should be included
_, err = GetConfigmap(e.Client.ClientGo, namespace, e.CaseBaseName)
_, err = GetConfigMap(e.Client.ClientGo, namespace, e.CaseBaseName)
Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q", namespace))
})
return nil

View File

@@ -134,7 +134,7 @@ func (r *ResourceModifiersCase) Clean() error {
if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
if err := DeleteConfigmap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil {
if err := DeleteConfigMap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil {
return err
}

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package filtering
package resourcepolicies
import (
"fmt"
@@ -29,7 +29,6 @@ import (
. "github.com/vmware-tanzu/velero/test"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
)
const FileName = "test-data.txt"
@@ -101,10 +100,6 @@ func (r *ResourcePoliciesCase) Init() error {
}
func (r *ResourcePoliciesCase) CreateResources() error {
By(("Installing storage class..."), func() {
Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s.yaml", r.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class")
})
By(fmt.Sprintf("Create configmap %s in namespaces %s for workload\n", r.cmName, r.VeleroCfg.VeleroNamespace), func() {
Expect(CreateConfigMapFromYAMLData(r.Client.ClientGo, r.yamlConfig, r.cmName, r.VeleroCfg.VeleroNamespace)).To(Succeed(), fmt.Sprintf("Failed to create configmap %s in namespaces %s for workload\n", r.cmName, r.VeleroCfg.VeleroNamespace))
})
@@ -181,11 +176,7 @@ func (r *ResourcePoliciesCase) Clean() error {
if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
if err := r.deleteTestStorageClassList([]string{StorageClassName, StorageClassName2}); err != nil {
return err
}
if err := DeleteConfigmap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil {
if err := DeleteConfigMap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil {
return err
}
@@ -248,12 +239,3 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro
}
return nil
}
func (r *ResourcePoliciesCase) deleteTestStorageClassList(scList []string) error {
for _, v := range scList {
if err := DeleteStorageClass(r.Ctx, r.Client, v); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,196 @@
package schedule
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/test"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
var ScheduleInProgressTest func() = framework.TestFunc(&InProgressCase{})
type InProgressCase struct {
framework.TestCase
namespace string
ScheduleName string
ScheduleArgs []string
volume string
podName string
pvcName string
podAnn map[string]string
podSleepDuration time.Duration
}
func (s *InProgressCase) Init() error {
Expect(s.TestCase.Init()).To(Succeed())
s.CaseBaseName = "schedule-backup-creation-test" + s.UUIDgen
s.ScheduleName = "schedule-" + s.CaseBaseName
s.namespace = s.CaseBaseName
podSleepDurationStr := "60s"
s.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr)
s.TestMsg = &framework.TestMSG{
Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
FailedMSG: "Failed to verify schedule back creation behavior",
Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
}
s.podAnn = map[string]string{
"pre.hook.backup.velero.io/container": s.podName,
"pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]",
"pre.hook.backup.velero.io/timeout": "120s",
}
s.volume = "volume-1"
s.podName = "pod-1"
s.pvcName = "pvc-1"
s.ScheduleArgs = []string{
"--include-namespaces", s.namespace,
"--schedule=@every 1m",
}
return nil
}
func (s *InProgressCase) CreateResources() error {
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
Expect(
k8sutil.CreateNamespace(
s.Ctx,
s.Client,
s.namespace,
),
).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", s.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
_, err := k8sutil.CreatePod(
s.Client,
s.namespace,
s.podName,
test.StorageClassName,
s.pvcName,
[]string{s.volume},
nil,
s.podAnn,
)
Expect(err).To(Succeed())
err = k8sutil.WaitForPods(
s.Ctx,
s.Client,
s.namespace,
[]string{s.podName},
)
Expect(err).To(Succeed())
})
return nil
}
func (s *InProgressCase) Backup() error {
By(fmt.Sprintf("Creating schedule %s\n", s.ScheduleName), func() {
Expect(
veleroutil.VeleroScheduleCreate(
s.Ctx,
s.VeleroCfg.VeleroCLI,
s.VeleroCfg.VeleroNamespace,
s.ScheduleName,
s.ScheduleArgs,
),
).To(
Succeed(),
func() string {
veleroutil.RunDebug(
context.Background(),
s.VeleroCfg.VeleroCLI,
s.VeleroCfg.VeleroNamespace,
"",
"",
)
return "Fail to create schedule"
})
})
By("Get backup every half minute.", func() {
err := wait.PollUntilContextTimeout(
s.Ctx,
30*time.Second,
5*time.Minute,
true,
func(ctx context.Context) (bool, error) {
backupList := new(velerov1api.BackupList)
if err := s.Client.Kubebuilder.List(
s.Ctx,
backupList,
&kbclient.ListOptions{
Namespace: s.VeleroCfg.VeleroNamespace,
LabelSelector: labels.SelectorFromSet(map[string]string{
velerov1api.ScheduleNameLabel: s.ScheduleName,
}),
},
); err != nil {
return false, fmt.Errorf("failed to list backup in %s namespace for schedule %s: %s",
s.VeleroCfg.VeleroNamespace, s.ScheduleName, err.Error())
}
if len(backupList.Items) == 0 {
fmt.Println("No backup is found yet. Continue query on the next turn.")
return false, nil
}
inProgressBackupCount := 0
for _, backup := range backupList.Items {
if backup.Status.Phase == velerov1api.BackupPhaseInProgress {
inProgressBackupCount++
}
}
// There should be at most one in-progress backup per schedule.
Expect(inProgressBackupCount).Should(BeNumerically("<=", 1))
// Already ensured at most one in-progress backup when schedule triggered 2 backups.
// Succeed.
if len(backupList.Items) >= 2 {
return true, nil
}
fmt.Println("Wait until the schedule triggers two backups.")
return false, nil
},
)
Expect(err).To(Succeed())
})
return nil
}
func (s *InProgressCase) Clean() error {
if CurrentSpecReport().Failed() && s.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
Expect(
veleroutil.VeleroScheduleDelete(
s.Ctx,
s.VeleroCfg.VeleroCLI,
s.VeleroCfg.VeleroNamespace,
s.ScheduleName,
),
).To(Succeed())
Expect(s.TestCase.Clean()).To(Succeed())
}
return nil
}

View File

@@ -18,6 +18,7 @@ limitations under the License.
//the ordered resources test related to https://github.com/vmware-tanzu/velero/issues/4561
import (
"context"
"fmt"
"strings"
"time"
@@ -25,129 +26,189 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
waitutil "k8s.io/apimachinery/pkg/util/wait"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
var ScheduleOrderedResources func() = TestFunc(&OrderedResources{})
var ScheduleOrderedResources func() = framework.TestFunc(&OrderedResources{})
type OrderedResources struct {
Namespace string
ScheduleName string
OrderMap map[string]string
ScheduleArgs []string
TestCase
Namespace string
ScheduleName string
OrderResource map[string]string
ScheduleArgs []string
framework.TestCase
}
func (o *OrderedResources) Init() error {
o.TestCase.Init()
Expect(o.TestCase.Init()).To(Succeed())
o.CaseBaseName = "ordered-resources-" + o.UUIDgen
o.ScheduleName = "schedule-" + o.CaseBaseName
o.Namespace = o.CaseBaseName + "-" + o.UUIDgen
o.OrderMap = map[string]string{
o.OrderResource = map[string]string{
"deployments": fmt.Sprintf("deploy-%s", o.CaseBaseName),
"secrets": fmt.Sprintf("secret-%s", o.CaseBaseName),
"configmaps": fmt.Sprintf("configmap-%s", o.CaseBaseName),
}
o.TestMsg = &TestMSG{
orderResourceArray := make([]string, 0)
for k, v := range o.OrderResource {
orderResourceArray = append(
orderResourceArray,
fmt.Sprintf("%s=%s", k, v),
)
}
orderResourceStr := strings.Join(orderResourceArray, ";")
o.TestMsg = &framework.TestMSG{
Desc: "Create a schedule to backup resources in a specific order should be successful",
FailedMSG: "Failed to verify schedule backup resources in a specific order",
Text: "Create a schedule to backup resources in a specific order should be successful",
}
o.ScheduleArgs = []string{"--schedule", "@every 1m",
"--include-namespaces", o.Namespace, "--default-volumes-to-fs-backup", "--ordered-resources"}
var orderStr string
for kind, resource := range o.OrderMap {
orderStr += fmt.Sprintf("%s=%s;", kind, resource)
o.ScheduleArgs = []string{
"--schedule",
"@every 1m",
"--include-namespaces",
o.Namespace,
"--default-volumes-to-fs-backup",
"--ordered-resources",
orderResourceStr,
}
o.ScheduleArgs = append(o.ScheduleArgs, strings.TrimRight(orderStr, ";"))
return nil
}
func (o *OrderedResources) CreateResources() error {
label := map[string]string{
"orderedresources": "true",
}
fmt.Printf("Creating resources in %s namespace ...\n", o.Namespace)
if err := CreateNamespace(o.Ctx, o.Client, o.Namespace); err != nil {
if err := k8sutil.CreateNamespace(o.Ctx, o.Client, o.Namespace); err != nil {
return errors.Wrapf(err, "failed to create namespace %s", o.Namespace)
}
//Create deployment
deploymentName := fmt.Sprintf("deploy-%s", o.CaseBaseName)
fmt.Printf("Creating deployment %s in %s namespaces ...\n", deploymentName, o.Namespace)
deployment := NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result()
deployment, err := CreateDeployment(o.Client.ClientGo, o.Namespace, deployment)
deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result()
_, err := k8sutil.CreateDeployment(o.Client.ClientGo, o.Namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create namespace %q with err %v", o.Namespace, err))
}
err = WaitForReadyDeployment(o.Client.ClientGo, o.Namespace, deployment.Name)
err = k8sutil.WaitForReadyDeployment(o.Client.ClientGo, o.Namespace, deployment.Name)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to ensure job completion in namespace: %q", o.Namespace))
}
//Create Secret
secretName := fmt.Sprintf("secret-%s", o.CaseBaseName)
fmt.Printf("Creating secret %s in %s namespaces ...\n", secretName, o.Namespace)
_, err = CreateSecret(o.Client.ClientGo, o.Namespace, secretName, label)
_, err = k8sutil.CreateSecret(o.Client.ClientGo, o.Namespace, secretName, label)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create secret in the namespace %q", o.Namespace))
}
err = WaitForSecretsComplete(o.Client.ClientGo, o.Namespace, secretName)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to ensure secret completion in namespace: %q", o.Namespace))
}
//Create Configmap
configmapName := fmt.Sprintf("configmap-%s", o.CaseBaseName)
fmt.Printf("Creating configmap %s in %s namespaces ...\n", configmapName, o.Namespace)
_, err = CreateConfigMap(o.Client.ClientGo, o.Namespace, configmapName, label, nil)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create configmap in the namespace %q", o.Namespace))
}
err = WaitForConfigMapComplete(o.Client.ClientGo, o.Namespace, configmapName)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to ensure secret completion in namespace: %q", o.Namespace))
//Create ConfigMap
cmName := fmt.Sprintf("configmap-%s", o.CaseBaseName)
fmt.Printf("Creating ConfigMap %s in %s namespaces ...\n", cmName, o.Namespace)
if _, err := k8sutil.CreateConfigMap(
o.Client.ClientGo,
o.Namespace,
cmName,
label,
nil,
); err != nil {
return errors.Wrap(
err,
fmt.Sprintf("failed to create ConfigMap in the namespace %q", o.Namespace),
)
}
return nil
}
func (o *OrderedResources) Backup() error {
By(fmt.Sprintf("Create schedule the workload in %s namespace", o.Namespace), func() {
err := VeleroScheduleCreate(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, o.ScheduleName, o.ScheduleArgs)
err := veleroutil.VeleroScheduleCreate(
o.Ctx,
o.VeleroCfg.VeleroCLI,
o.VeleroCfg.VeleroNamespace,
o.ScheduleName,
o.ScheduleArgs,
)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to create schedule %s with err %v", o.ScheduleName, err))
})
return nil
}
func (o *OrderedResources) Destroy() error {
return nil
}
func (o *OrderedResources) Verify() error {
By(fmt.Sprintf("Checking resource order in %s schedule cr", o.ScheduleName), func() {
err := CheckScheduleWithResourceOrder(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, o.ScheduleName, o.OrderMap)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to check schedule %s with err %v", o.ScheduleName, err))
By(fmt.Sprintf("Checking resource order in %s schedule CR", o.ScheduleName), func() {
err := veleroutil.CheckScheduleWithResourceOrder(
o.Ctx,
o.VeleroCfg.VeleroCLI,
o.VeleroCfg.VeleroNamespace,
o.ScheduleName,
o.OrderResource,
)
Expect(err).To(
Succeed(),
fmt.Sprintf("Failed to check schedule %s with err %v", o.ScheduleName, err),
)
})
By("Checking resource order in backup cr", func() {
backupList := new(velerov1api.BackupList)
err := waitutil.PollImmediate(10*time.Second, time.Minute*5, func() (bool, error) {
if err := o.Client.Kubebuilder.List(o.Ctx, backupList, &kbclient.ListOptions{Namespace: o.VeleroCfg.VeleroNamespace}); err != nil {
return false, fmt.Errorf("failed to list backup object in %s namespace with err %v", o.VeleroCfg.VeleroNamespace, err)
}
err := waitutil.PollUntilContextTimeout(
o.Ctx,
30*time.Second,
time.Minute*5,
true,
func(ctx context.Context) (bool, error) {
backupList := new(velerov1api.BackupList)
for _, backup := range backupList.Items {
if err := CheckBackupWithResourceOrder(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, backup.Name, o.OrderMap); err == nil {
return true, nil
if err := o.Client.Kubebuilder.List(
o.Ctx,
backupList,
&kbclient.ListOptions{
Namespace: o.VeleroCfg.VeleroNamespace,
LabelSelector: labels.SelectorFromSet(map[string]string{
velerov1api.ScheduleNameLabel: o.ScheduleName,
}),
},
); err != nil {
return false, fmt.Errorf("failed to list backup in %s namespace for schedule %s: %s",
o.VeleroCfg.VeleroNamespace, o.ScheduleName, err.Error())
}
}
fmt.Printf("still finding backup created by schedule %s ...\n", o.ScheduleName)
return false, nil
})
Expect(err).To(Succeed(), fmt.Sprintf("Failed to check schedule %s created backup with err %v", o.ScheduleName, err))
for _, backup := range backupList.Items {
if err := veleroutil.CheckBackupWithResourceOrder(
o.Ctx,
o.VeleroCfg.VeleroCLI,
o.VeleroCfg.VeleroNamespace,
backup.Name,
o.OrderResource,
); err == nil {
// After schedule successfully triggers a backup,
// the workload namespace is deleted.
// It's possible the following backup may fail.
// As a result, as long as there is one backup in Completed state,
// the case assumes test pass.
return true, nil
}
}
fmt.Printf("still finding backup created by schedule %s ...\n", o.ScheduleName)
return false, nil
})
Expect(err).To(
Succeed(),
fmt.Sprintf("Failed to check schedule %s created backup with err %v",
o.ScheduleName, err),
)
})
return nil
}
@@ -156,22 +217,15 @@ func (o *OrderedResources) Clean() error {
if CurrentSpecReport().Failed() && o.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
Expect(VeleroScheduleDelete(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, o.ScheduleName)).To(Succeed())
Expect(veleroutil.VeleroScheduleDelete(
o.Ctx,
o.VeleroCfg.VeleroCLI,
o.VeleroCfg.VeleroNamespace,
o.ScheduleName,
)).To(Succeed())
Expect(o.TestCase.Clean()).To(Succeed())
}
return nil
}
func (o *OrderedResources) DeleteAllBackups() error {
backupList := new(velerov1api.BackupList)
if err := o.Client.Kubebuilder.List(o.Ctx, backupList, &kbclient.ListOptions{Namespace: o.VeleroCfg.VeleroNamespace}); err != nil {
return fmt.Errorf("failed to list backup object in %s namespace with err %v", o.VeleroCfg.VeleroNamespace, err)
}
for _, backup := range backupList.Items {
if err := VeleroBackupDelete(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, backup.Name); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,210 @@
package schedule
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
)
type PeriodicalCase struct {
framework.TestCase
ScheduleName string
ScheduleArgs []string
Period int // The minimum unit is minute.
}
var SchedulePeriodicalTest func() = framework.TestFunc(&PeriodicalCase{})
func (n *PeriodicalCase) Init() error {
Expect(n.TestCase.Init()).To(Succeed())
n.CaseBaseName = "schedule-backup-" + n.UUIDgen
n.NSIncluded = &[]string{n.CaseBaseName}
n.ScheduleName = "schedule-" + n.CaseBaseName
n.RestoreName = "restore-" + n.CaseBaseName
n.TestMsg = &framework.TestMSG{
Desc: "Set up a scheduled backup defined by a Cron expression",
FailedMSG: "Failed to schedule a backup",
Text: "Should backup periodically according to the schedule",
}
n.ScheduleArgs = []string{
"--include-namespaces", strings.Join(*n.NSIncluded, ","),
"--schedule=@every 1m",
}
return nil
}
func (n *PeriodicalCase) CreateResources() error {
for _, ns := range *n.NSIncluded {
By(fmt.Sprintf("Creating namespaces %s ......\n", ns), func() {
Expect(
k8sutil.CreateNamespace(
n.Ctx,
n.Client,
ns,
),
).To(
Succeed(),
fmt.Sprintf("Failed to create namespace %s", ns),
)
})
cmName := n.CaseBaseName
fmt.Printf("Creating ConfigMap %s in namespaces ...%s\n", cmName, ns)
_, err := k8sutil.CreateConfigMap(
n.Client.ClientGo,
ns,
cmName,
nil,
nil,
)
Expect(err).To(Succeed(), fmt.Sprintf("failed to create ConfigMap in the namespace %q", ns))
}
return nil
}
func (n *PeriodicalCase) Backup() error {
By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() {
Expect(
veleroutil.VeleroScheduleCreate(
n.Ctx,
n.VeleroCfg.VeleroCLI,
n.VeleroCfg.VeleroNamespace,
n.ScheduleName,
n.ScheduleArgs,
),
).To(Succeed())
})
By(fmt.Sprintf("No immediate backup is created by schedule %s\n", n.ScheduleName), func() {
backups, err := veleroutil.GetBackupsForSchedule(
n.Ctx,
n.Client.Kubebuilder,
n.ScheduleName,
n.VeleroCfg.Namespace,
)
Expect(err).To(Succeed())
Expect(backups).To(BeEmpty())
})
By("Wait until schedule triggers backup.", func() {
err := wait.PollUntilContextTimeout(
n.Ctx,
30*time.Second,
5*time.Minute,
true,
func(ctx context.Context) (bool, error) {
backups, err := veleroutil.GetBackupsForSchedule(
n.Ctx,
n.Client.Kubebuilder,
n.ScheduleName,
n.VeleroCfg.Namespace,
)
if err != nil {
fmt.Println("Fail to get backups for schedule.")
return false, err
}
// The triggered backup completed.
if len(backups) == 1 &&
backups[0].Status.Phase == velerov1api.BackupPhaseCompleted {
n.BackupName = backups[0].Name
return true, nil
}
return false, nil
},
)
Expect(err).To(Succeed())
})
n.RestoreArgs = []string{
"create", "--namespace", n.VeleroCfg.VeleroNamespace, "restore", n.RestoreName,
"--from-backup", n.BackupName,
"--wait",
}
By(fmt.Sprintf("Pause schedule %s ......\n", n.ScheduleName), func() {
Expect(
veleroutil.VeleroSchedulePause(
n.Ctx,
n.VeleroCfg.VeleroCLI,
n.VeleroCfg.VeleroNamespace,
n.ScheduleName,
),
).To(Succeed())
})
By(("Sleep 2 minutes"), func() {
time.Sleep(2 * time.Minute)
})
backups, err := veleroutil.GetBackupsForSchedule(
n.Ctx,
n.Client.Kubebuilder,
n.ScheduleName,
n.VeleroCfg.Namespace,
)
Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName))
backupCountPostPause := len(backups)
fmt.Printf("After pause, backups count is %d\n", backupCountPostPause)
By(fmt.Sprintf("Verify no new backups from %s ......\n", n.ScheduleName), func() {
Expect(backupCountPostPause).To(Equal(1))
})
By(fmt.Sprintf("Unpause schedule %s ......\n", n.ScheduleName), func() {
Expect(
veleroutil.VeleroScheduleUnpause(
n.Ctx,
n.VeleroCfg.VeleroCLI,
n.VeleroCfg.VeleroNamespace,
n.ScheduleName,
),
).To(Succeed())
})
return nil
}
func (n *PeriodicalCase) Verify() error {
By("Namespaces were restored", func() {
for _, ns := range *n.NSIncluded {
_, err := k8sutil.GetConfigMap(n.Client.ClientGo, ns, n.CaseBaseName)
Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list CM in namespace: %s\n", ns))
}
})
return nil
}
func (n *PeriodicalCase) Clean() error {
if CurrentSpecReport().Failed() && n.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
Expect(
veleroutil.VeleroScheduleDelete(
n.Ctx,
n.VeleroCfg.VeleroCLI,
n.VeleroCfg.VeleroNamespace,
n.ScheduleName,
),
).To(Succeed())
Expect(n.TestCase.Clean()).To(Succeed())
}
return nil
}

View File

@@ -1,137 +0,0 @@
package schedule
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
)
type ScheduleBackupCreation struct {
TestCase
namespace string
ScheduleName string
ScheduleArgs []string
Period int //Limitation: The unit is minitue only and 60 is divisible by it
randBackupName string
verifyTimes int
volume string
podName string
pvcName string
podAnn map[string]string
podSleepDuration time.Duration
}
var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{})
func (s *ScheduleBackupCreation) Init() error {
s.TestCase.Init()
s.CaseBaseName = "schedule-backup-creation-test" + s.UUIDgen
s.ScheduleName = "schedule-" + s.CaseBaseName
s.namespace = s.GetTestCase().CaseBaseName
s.Period = 3 // Unit is minute
s.verifyTimes = 5 // More larger verify times more confidence we have
podSleepDurationStr := "300s"
s.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr)
s.TestMsg = &TestMSG{
Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
FailedMSG: "Failed to verify schedule back creation behavior",
Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
}
s.podAnn = map[string]string{
"pre.hook.backup.velero.io/container": s.podName,
"pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]",
"pre.hook.backup.velero.io/timeout": "600s",
}
s.volume = "volume-1"
s.podName = "pod-1"
s.pvcName = "pvc-1"
s.ScheduleArgs = []string{
"--include-namespaces", s.namespace,
"--schedule=*/" + fmt.Sprintf("%v", s.Period) + " * * * *",
}
Expect(s.Period).To(BeNumerically("<", 30))
return nil
}
func (s *ScheduleBackupCreation) CreateResources() error {
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", s.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
_, err := CreatePod(s.Client, s.namespace, s.podName, "default", s.pvcName, []string{s.volume}, nil, s.podAnn)
Expect(err).To(Succeed())
err = WaitForPods(s.Ctx, s.Client, s.namespace, []string{s.podName})
Expect(err).To(Succeed())
})
return nil
}
func (s *ScheduleBackupCreation) Backup() error {
// Wait until the beginning of the given period to create schedule, it will give us
// a predictable period to wait for the first scheduled backup, and verify no immediate
// scheduled backup was created between schedule creation and first scheduled backup.
By(fmt.Sprintf("Creating schedule %s ......\n", s.ScheduleName), func() {
for i := 0; i < s.Period*60/30; i++ {
time.Sleep(30 * time.Second)
now := time.Now().Minute()
triggerNow := now % s.Period
if triggerNow == 0 {
Expect(VeleroScheduleCreate(s.Ctx, s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, s.ScheduleName, s.ScheduleArgs)).To(Succeed(), func() string {
RunDebug(context.Background(), s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, "", "")
return "Fail to create schedule"
})
break
}
}
})
By("Delay one more minute to make sure the new backup was created in the given period", func() {
time.Sleep(1 * time.Minute)
})
By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", s.Period), func() {
for i := 1; i <= s.verifyTimes; i++ {
fmt.Printf("Start to sleep %d minute #%d time...\n", s.podSleepDuration, i)
mi, _ := time.ParseDuration("60s")
time.Sleep(s.podSleepDuration + mi)
bMap := make(map[string]string)
backupsInfo, err := GetScheduledBackupsCreationTime(s.Ctx, s.VeleroCfg.VeleroCLI, "default", s.ScheduleName)
Expect(err).To(Succeed())
Expect(backupsInfo).To(HaveLen(i))
for index, bi := range backupsInfo {
bList := strings.Split(bi, ",")
fmt.Printf("Backup %d: %v\n", index, bList)
bMap[bList[0]] = bList[1]
_, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1])
Expect(err).To(Succeed())
}
if i == s.verifyTimes-1 {
backupInfo := backupsInfo[rand.Intn(len(backupsInfo))]
s.randBackupName = strings.Split(backupInfo, ",")[0]
}
}
})
return nil
}
func (s *ScheduleBackupCreation) Clean() error {
if CurrentSpecReport().Failed() && s.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
Expect(VeleroScheduleDelete(s.Ctx, s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, s.ScheduleName)).To(Succeed())
Expect(s.TestCase.Clean()).To(Succeed())
}
return nil
}

View File

@@ -1,214 +0,0 @@
package schedule
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/util/k8s"
. "github.com/vmware-tanzu/velero/test/util/velero"
)
type ScheduleBackup struct {
TestCase
ScheduleName string
ScheduleArgs []string
Period int //Limitation: The unit is minitue only and 60 is divisible by it
randBackupName string
verifyTimes int
}
var ScheduleBackupTest func() = TestFunc(&ScheduleBackup{})
func (n *ScheduleBackup) Init() error {
n.TestCase.Init()
n.CaseBaseName = "schedule-backup-" + n.UUIDgen
n.NSIncluded = &[]string{n.CaseBaseName}
n.ScheduleName = "schedule-" + n.CaseBaseName
n.RestoreName = "restore-" + n.CaseBaseName
n.Period = 3 // Unit is minute
n.verifyTimes = 5 // More larger verify times more confidence we have
n.TestMsg = &TestMSG{
Desc: "Set up a scheduled backup defined by a Cron expression",
FailedMSG: "Failed to schedule a backup",
Text: "should backup periodly according to the schedule",
}
n.ScheduleArgs = []string{
"--include-namespaces", strings.Join(*n.NSIncluded, ","),
"--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *",
}
Expect(n.Period).To(BeNumerically("<", 30))
return nil
}
func (n *ScheduleBackup) CreateResources() error {
for _, ns := range *n.NSIncluded {
By(fmt.Sprintf("Creating namespaces %s ......\n", ns), func() {
Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
})
configmaptName := n.CaseBaseName
fmt.Printf("Creating configmap %s in namespaces ...%s\n", configmaptName, ns)
_, err := CreateConfigMap(n.Client.ClientGo, ns, configmaptName, nil, nil)
Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", ns))
Expect(WaitForConfigMapComplete(n.Client.ClientGo, ns, configmaptName)).To(Succeed(),
fmt.Sprintf("failed to ensure secret completion in namespace: %q", ns))
}
return nil
}
func (n *ScheduleBackup) Backup() error {
// Wait until the beginning of the given period to create schedule, it will give us
// a predictable period to wait for the first scheduled backup, and verify no immediate
// scheduled backup was created between schedule creation and first scheduled backup.
By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() {
for i := 0; i < n.Period*60/30; i++ {
time.Sleep(30 * time.Second)
now := time.Now().Minute()
triggerNow := now % n.Period
if triggerNow == 0 {
Expect(VeleroScheduleCreate(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string {
RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", "")
return "Fail to create schedule"
})
break
}
}
})
By(fmt.Sprintf("Schedule %s is created without any delay\n", n.ScheduleName), func() {
creationTimestamp, err := GetSchedule(n.Ctx, n.VeleroCfg.VeleroNamespace, n.ScheduleName)
Expect(err).To(Succeed())
creationTime, err := time.Parse(time.RFC3339, strings.Replace(creationTimestamp, "'", "", -1))
Expect(err).To(Succeed())
fmt.Printf("Schedule %s created at %s\n", n.ScheduleName, creationTime)
now := time.Now()
diff := creationTime.Sub(now)
Expect(diff.Minutes()).To(BeNumerically("<", 1))
})
By(fmt.Sprintf("No immediate backup is created by schedule %s\n", n.ScheduleName), func() {
for i := 0; i < n.Period; i++ {
time.Sleep(1 * time.Minute)
now := time.Now()
fmt.Printf("Get backup for #%d time at %v\n", i, now)
//Ignore the last minute in the period avoiding met the 1st backup by schedule
if i != n.Period-1 {
backupsInfo, err := GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed())
Expect(backupsInfo).To(BeEmpty())
}
}
})
By("Delay one more minute to make sure the new backup was created in the given period", func() {
time.Sleep(time.Minute)
})
By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() {
for i := 0; i < n.verifyTimes; i++ {
fmt.Printf("Start to sleep %d minute #%d time...\n", n.Period, i+1)
time.Sleep(time.Duration(n.Period) * time.Minute)
bMap := make(map[string]string)
backupsInfo, err := GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed())
Expect(backupsInfo).To(HaveLen(i + 2))
for index, bi := range backupsInfo {
bList := strings.Split(bi, ",")
fmt.Printf("Backup %d: %v\n", index, bList)
bMap[bList[0]] = bList[1]
_, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1])
Expect(err).To(Succeed())
}
if i == n.verifyTimes-1 {
backupInfo := backupsInfo[rand.Intn(len(backupsInfo))]
n.randBackupName = strings.Split(backupInfo, ",")[0]
}
}
})
n.BackupName = strings.Replace(n.randBackupName, " ", "", -1)
n.RestoreArgs = []string{
"create", "--namespace", n.VeleroCfg.VeleroNamespace, "restore", n.RestoreName,
"--from-backup", n.BackupName,
"--wait",
}
backupsInfo, err := GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName))
fmt.Println(backupsInfo)
backupCount := len(backupsInfo)
By(fmt.Sprintf("Pause schedule %s ......\n", n.ScheduleName), func() {
Expect(VeleroSchedulePause(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string {
RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", "")
return "Fail to pause schedule"
})
})
periodCount := 3
sleepDuration := time.Duration(n.Period*periodCount) * time.Minute
By(fmt.Sprintf("Sleep for %s ......\n", sleepDuration), func() {
time.Sleep(sleepDuration)
})
backupsInfo, err = GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName))
backupCountPostPause := len(backupsInfo)
fmt.Printf("After pause, backkups count is %d\n", backupCountPostPause)
By(fmt.Sprintf("Verify no new backups from %s ......\n", n.ScheduleName), func() {
Expect(backupCountPostPause).To(Equal(backupCount))
})
By(fmt.Sprintf("Unpause schedule %s ......\n", n.ScheduleName), func() {
Expect(VeleroScheduleUnpause(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string {
RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", "")
return "Fail to unpause schedule"
})
})
By(fmt.Sprintf("Sleep for %s ......\n", sleepDuration), func() {
time.Sleep(sleepDuration)
})
backupsInfo, err = GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName))
fmt.Println(backupsInfo)
backupCountPostUnpause := len(backupsInfo)
fmt.Printf("After unpause, backkups count is %d\n", backupCountPostUnpause)
By(fmt.Sprintf("Verify no new backups by schedule %s ......\n", n.ScheduleName), func() {
Expect(backupCountPostUnpause - backupCount).To(BeNumerically(">=", periodCount-1))
})
return nil
}
func (n *ScheduleBackup) Verify() error {
By("Namespaces were restored", func() {
for _, ns := range *n.NSIncluded {
configmap, err := GetConfigmap(n.Client.ClientGo, ns, n.CaseBaseName)
fmt.Printf("Restored configmap is %v\n", configmap)
Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q\n", ns))
}
})
return nil
}
func (n *ScheduleBackup) Clean() error {
if CurrentSpecReport().Failed() && n.VeleroCfg.FailFast {
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
} else {
Expect(VeleroScheduleDelete(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed())
Expect(n.TestCase.Clean()).To(Succeed())
}
return nil
}

View File

@@ -91,8 +91,9 @@ func TestFuncWithMultiIt(tests []VeleroBackupRestoreTest) func() {
}
func TestIt(test VeleroBackupRestoreTest) error {
test.Init()
It(test.GetTestMsg().Text, func() {
It("Run E2E test case", func() {
Expect(test.Init()).To(Succeed())
Expect(RunTestCase(test)).To(Succeed(), test.GetTestMsg().FailedMSG)
})
return nil
@@ -213,6 +214,7 @@ func RunTestCase(test VeleroBackupRestoreTest) error {
if test == nil {
return errors.New("No case should be tested")
}
fmt.Println("Running case: ", test.GetTestMsg().Text)
test.Start()
defer test.GetTestCase().CtxCancel()

View File

@@ -78,8 +78,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
By("Uninstall Velero", func() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
})
}
})
@@ -97,8 +96,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
By("Uninstall Velero", func() {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI,
veleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed())
})
}
}
@@ -124,30 +122,23 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
veleroCfg.GCFrequency = ""
By(fmt.Sprintf("Install the expected old version Velero (%s) for upgrade",
veleroCLI2Version.VeleroVersion), func() {
//Set VeleroImage and RestoreHelperImage to blank
//VeleroImage and RestoreHelperImage should be the default value in originalCli
tmpCfgForOldVeleroInstall := veleroCfg
tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion = veleroCLI2Version.VeleroVersion
tmpCfgForOldVeleroInstall.VeleroCLI = veleroCLI2Version.VeleroCLI
tmpCfgForOldVeleroInstall.VeleroImage = ""
tmpCfgForOldVeleroInstall.RestoreHelperImage = ""
tmpCfgForOldVeleroInstall.Plugins = ""
tmpCfgForOldVeleroInstall, err = SetImagesToDefaultValues(
tmpCfgForOldVeleroInstall,
veleroCLI2Version.VeleroVersion,
)
Expect(err).To(Succeed(), "Fail to set the images for upgrade-from Velero installation.")
tmpCfgForOldVeleroInstall.UploaderType = ""
version, err := GetVeleroVersion(oneHourTimeout, tmpCfgForOldVeleroInstall.VeleroCLI, true)
Expect(err).To(Succeed(), "Fail to get Velero version")
tmpCfgForOldVeleroInstall.VeleroVersion = version
tmpCfgForOldVeleroInstall.UseVolumeSnapshots = useVolumeSnapshots
if supportUploaderType {
tmpCfgForOldVeleroInstall.UseRestic = false
tmpCfgForOldVeleroInstall.UseNodeAgent = !useVolumeSnapshots
} else {
tmpCfgForOldVeleroInstall.UseRestic = !useVolumeSnapshots
tmpCfgForOldVeleroInstall.UseNodeAgent = false
}
//TODO: Remove this setting when upgrade path is from 1.13 to higher
//TODO: version, or self version 1.12 and older versions have no this parameter.
tmpCfgForOldVeleroInstall.WithoutDisableInformerCacheParam = true
tmpCfgForOldVeleroInstall.UseNodeAgent = !useVolumeSnapshots
Expect(VeleroInstall(context.Background(), &tmpCfgForOldVeleroInstall, false)).To(Succeed())
Expect(CheckVeleroVersion(context.Background(), tmpCfgForOldVeleroInstall.VeleroCLI,
@@ -190,8 +181,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
})
if useVolumeSnapshots {
if veleroCfg.CloudProvider == Vsphere {
// TODO - remove after upload progress monitoring is implemented
if veleroCfg.HasVspherePlugin {
By("Waiting for vSphere uploads to complete", func() {
Expect(WaitForVSphereUploadCompletion(oneHourTimeout, time.Hour,
upgradeNamespace, 2)).To(Succeed())
@@ -203,9 +193,12 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
snapshotCheckPoint, err := GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 2,
upgradeNamespace, backupName, KibishiiPVCNameList)
Expect(err).NotTo(HaveOccurred(), "Fail to get snapshot checkpoint")
Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider,
veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket,
veleroCfg.BSLConfig, backupName, snapshotCheckPoint)).To(Succeed())
Expect(CheckSnapshotsInProvider(
veleroCfg,
backupName,
snapshotCheckPoint,
false,
)).To(Succeed())
})
}
@@ -231,7 +224,6 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
By(fmt.Sprintf("Upgrade Velero by CLI %s", tmpCfg.VeleroCLI), func() {
tmpCfg.GCFrequency = ""
tmpCfg.UseRestic = false
tmpCfg.UseNodeAgent = !useVolumeSnapshots
Expect(err).To(Succeed())
if supportUploaderType {

View File

@@ -139,7 +139,7 @@ var _ = AfterSuite(func() {
By("release test resources after testing")
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
Expect(VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed())
Expect(VeleroUninstall(ctx, VeleroCfg)).To(Succeed())
}
}
})

12
test/testdata/storage-class/README.md vendored Normal file
View File

@@ -0,0 +1,12 @@
The `test/testdata/storage-class` directory contains the StorageClass YAMLs used for E2E.
The public cloud provider (including AWS, Azure and GCP) has two StorageClasses.
* The `provider-name`.yaml contains the default StorageClass for the provider. It uses the CSI provisioner.
* The `provider-name`-legacy.yaml contains the legacy StorageClass for the provider. It uses the in-tree volume plugin as the provisioner. By far, there is no E2E case using them.
The vSphere environment also has two StorageClass files.
* The vsphere-legacy.yaml is used for the TKGm environment.
* The vsphere.yaml is used for the VKS environment.
The ZFS StorageClasses only have the default one. There is no in-tree volume plugin used StorageClass used in E2E.
The kind StorageClass uses the local-path provisioner. Will consider adding the CSI provisioner when there is a need.

View File

@@ -1,8 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-csi-storage-class
provisioner: ebs.csi.aws.com
name: e2e-storage-class
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Delete

View File

@@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-storage-class
provisioner: kubernetes.io/aws-ebs
provisioner: ebs.csi.aws.com
parameters:
type: gp2
reclaimPolicy: Delete

View File

@@ -1,8 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-csi-storage-class
provisioner: disk.csi.azure.com
name: e2e-storage-class
provisioner: kubernetes.io/azure-disk
parameters:
cachingmode: ReadOnly
kind: Managed

View File

@@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-storage-class
provisioner: kubernetes.io/azure-disk
provisioner: disk.csi.azure.com
parameters:
cachingmode: ReadOnly
kind: Managed

View File

@@ -4,10 +4,10 @@ kind: StorageClass
metadata:
labels:
addonmanager.kubernetes.io/mode: EnsureExists
name: e2e-csi-storage-class
name: e2e-storage-class
parameters:
type: pd-standard
provisioner: pd.csi.storage.gke.io
provisioner: kubernetes.io/gce-pd
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -7,7 +7,7 @@ metadata:
name: e2e-storage-class
parameters:
type: pd-standard
provisioner: kubernetes.io/gce-pd
provisioner: pd.csi.storage.gke.io
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -1,13 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-csi-storage-class
parameters:
recordsize: "128k"
compression: "off"
dedup: "off"
fstype: "zfs"
poolname: "zfspv-pool"
provisioner: zfs.csi.openebs.io
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Retain

View File

@@ -9,4 +9,4 @@ parameters:
fstype: "zfs"
poolname: "zfspv-pool"
provisioner: zfs.csi.openebs.io
volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: WaitForFirstConsumer

View File

@@ -1,11 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-csi-storage-class
name: e2e-storage-class
annotations:
storageclass.kubernetes.io/is-default-class: "false"
parameters:
StoragePolicyName: "vSAN Default Storage Policy"
StoragePolicyName: "vSAN Default Storage Policy" # This is used for TKGm environment.
provisioner: csi.vsphere.vmware.com
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -5,7 +5,7 @@ metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
parameters:
StoragePolicyName: "vSAN Default Storage Policy"
svStorageClass: worker-storagepolicy
provisioner: csi.vsphere.vmware.com
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -1,7 +1,7 @@
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1
metadata:
name: zfspv-snapclass
name: e2e-volume-snapshot-class
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
labels:

View File

@@ -0,0 +1,13 @@
---
apiVersion: snapshot.storage.k8s.io/v1
deletionPolicy: Delete
driver: csi.vsphere.vmware.com
kind: VolumeSnapshotClass
metadata:
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
labels:
velero.io/csi-volumesnapshot-class: "true"
name: e2e-volume-snapshot-class
parameters:
svVolumeSnapshotClass: volumesnapshotclass-delete

View File

@@ -22,12 +22,15 @@ import (
"github.com/google/uuid"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
. "github.com/vmware-tanzu/velero/test/util/k8s"
"github.com/vmware-tanzu/velero/test/util/k8s"
)
// e2e-storage-class is the default StorageClass for E2E.
const StorageClassName = "e2e-storage-class"
// e2e-storage-class-2 is used for the StorageClass mapping test case.
const StorageClassName2 = "e2e-storage-class-2"
const CSIStorageClassName = "e2e-csi-storage-class"
const FeatureCSI = "EnableCSI"
const VanillaZFS = "vanilla-zfs"
const Kind = "kind"
@@ -35,12 +38,22 @@ const Azure = "azure"
const AzureCSI = "azure-csi"
const AwsCSI = "aws-csi"
const AWS = "aws"
const Gcp = "gcp"
const GCP = "gcp"
const Vsphere = "vsphere"
const CSI = "csi"
const Velero = "velero"
const VeleroRestoreHelper = "velero-restore-helper"
const UploaderTypeRestic = "restic"
var PublicCloudProviders = []string{AWS, Azure, Gcp, Vsphere}
const (
KubeSystemNamespace = "kube-system"
VSphereCSIControllerNamespace = "vmware-system-csi"
VeleroVSphereSecretName = "velero-vsphere-config-secret"
VeleroVSphereConfigMapName = "velero-vsphere-plugin-config"
)
var PublicCloudProviders = []string{AWS, Azure, GCP, Vsphere}
var LocalCloudProviders = []string{Kind, VanillaZFS}
var CloudProviders = append(PublicCloudProviders, LocalCloudProviders...)
@@ -87,9 +100,9 @@ type VeleroConfig struct {
GCFrequency string
DefaultClusterContext string
StandbyClusterContext string
ClientToInstallVelero *TestClient
DefaultClient *TestClient
StandbyClient *TestClient
ClientToInstallVelero *k8s.TestClient
DefaultClient *k8s.TestClient
StandbyClient *k8s.TestClient
ClusterToInstallVelero string
DefaultClusterName string
StandbyClusterName string
@@ -110,6 +123,7 @@ type VeleroConfig struct {
ServiceAccountNameToInstall string
EKSPolicyARN string
FailFast bool
HasVspherePlugin bool
}
type VeleroCfgInPerf struct {

View File

@@ -113,8 +113,8 @@ func GetVolumeSnapshotContentNameByPod(client TestClient, podName, namespace, ba
if len(pvList) != 1 {
return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], podName, namespace))
}
pv_value, err := GetPersistentVolume(context.Background(), client, "", pvList[0])
fmt.Println(pv_value.Annotations["pv.kubernetes.io/provisioned-by"])
pvValue, err := GetPersistentVolume(context.Background(), client, "", pvList[0])
fmt.Println(pvValue.Annotations["pv.kubernetes.io/provisioned-by"])
if err != nil {
return "", err
}
@@ -148,14 +148,10 @@ func CheckVolumeSnapshotCR(client TestClient, index map[string]string, expectedC
if len(apiVersion) == 0 {
return nil, errors.New("Fail to get APIVersion")
}
// if apiVersion[0] == "v1beta1" {
// if snapshotContentNameList, err = GetCsiSnapshotHandle(client, apiVersion[0], index); err != nil {
// return nil, errors.Wrap(err, "Fail to get Azure CSI snapshot content")
// }
// } else
if apiVersion[0] == "v1" {
if snapshotContentNameList, err = GetCsiSnapshotHandle(client, apiVersion[0], index); err != nil {
return nil, errors.Wrap(err, "Fail to get Azure CSI snapshot content")
return nil, errors.Wrap(err, "Fail to get CSI snapshot content")
}
} else {
return nil, errors.New("API version is invalid")

Some files were not shown because too many files have changed in this diff Show More