mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-03-21 09:05:05 +00:00
Compare commits
72 Commits
copilot/de
...
jxun/main/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
29a9f80f10 | ||
|
|
66ac235e1f | ||
|
|
afe7df17d4 | ||
|
|
a31f4abcb3 | ||
|
|
2145c57642 | ||
|
|
a9b3cfa062 | ||
|
|
bca6afada7 | ||
|
|
d1cc303553 | ||
|
|
befa61cee1 | ||
|
|
245525c26b | ||
|
|
55737b9cf1 | ||
|
|
ffea850522 | ||
|
|
b3aff97684 | ||
|
|
23a3c242fa | ||
|
|
b7bc16f190 | ||
|
|
bbec46f6ee | ||
|
|
475050108b | ||
|
|
b5f7cd92c7 | ||
|
|
ab31b811ee | ||
|
|
19360622e7 | ||
|
|
932d27541c | ||
|
|
b0642b3078 | ||
|
|
9cada8fc11 | ||
|
|
25d5fa1b88 | ||
|
|
1c08af8461 | ||
|
|
6c3d81a146 | ||
|
|
8f32696449 | ||
|
|
3f15e9219f | ||
|
|
544b184d6c | ||
|
|
250c4db158 | ||
|
|
f0d81c56e2 | ||
|
|
8b5559274d | ||
|
|
7235180de4 | ||
|
|
ba5e7681ff | ||
|
|
fc0a16d734 | ||
|
|
bcdee1b116 | ||
|
|
2a696a4431 | ||
|
|
991bf1b000 | ||
|
|
4d47471932 | ||
|
|
0bf968d24d | ||
|
|
05c9a8d8f8 | ||
|
|
bc957a22b7 | ||
|
|
7e3d66adc7 | ||
|
|
710ebb9d92 | ||
|
|
1315399f35 | ||
|
|
7af688fbf5 | ||
|
|
41fa774844 | ||
|
|
5121417457 | ||
|
|
ece04e6e39 | ||
|
|
71ddeefcd6 | ||
|
|
e159992f48 | ||
|
|
48b14194df | ||
|
|
556d5826a8 | ||
|
|
62939cec18 | ||
|
|
7d6a10d3ea | ||
|
|
1c0cf6c51d | ||
|
|
58f0b29091 | ||
|
|
5cb4cdba61 | ||
|
|
325eb50480 | ||
|
|
993b80a350 | ||
|
|
a909bd1f85 | ||
|
|
62a47b9fc5 | ||
|
|
31e9dcbb87 | ||
|
|
f824c3ca3b | ||
|
|
18c32ed29c | ||
|
|
598c8c528b | ||
|
|
8f9beb04f0 | ||
|
|
bb518e6d89 | ||
|
|
89c5182c3c | ||
|
|
d17435542e | ||
|
|
e3b501d0d9 | ||
|
|
060b3364f2 |
@@ -17,6 +17,7 @@ If you're using Velero and want to add your organization to this list,
|
||||
<a href="https://www.replicated.com/" border="0" target="_blank"><img alt="replicated.com" src="site/static/img/adopters/replicated-logo-red.svg" height="50"></a>
|
||||
<a href="https://cloudcasa.io/" border="0" target="_blank"><img alt="cloudcasa.io" src="site/static/img/adopters/cloudcasa.svg" height="50"></a>
|
||||
<a href="https://azure.microsoft.com/" border="0" target="_blank"><img alt="azure.com" src="site/static/img/adopters/azure.svg" height="50"></a>
|
||||
<a href="https://www.broadcom.com/" border="0" target="_blank"><img alt="broadcom.com" src="site/static/img/adopters/broadcom.svg" height="50"></a>
|
||||
## Success Stories
|
||||
|
||||
Below is a list of adopters of Velero in **production environments** that have
|
||||
@@ -68,6 +69,9 @@ Replicated uses the Velero open source project to enable snapshots in [KOTS][101
|
||||
**[Microsoft Azure][105]**<br>
|
||||
[Azure Backup for AKS][106] is an Azure native, Kubernetes aware, Enterprise ready backup for containerized applications deployed on Azure Kubernetes Service (AKS). AKS Backup utilizes Velero to perform backup and restore operations to protect stateful applications in AKS clusters.<br>
|
||||
|
||||
**[Broadcom][107]**<br>
|
||||
[VMware Cloud Foundation][108] (VCF) offers built-in [vSphere Kubernetes Service][109] (VKS), a Kubernetes runtime that includes a CNCF certified Kubernetes distribution, to deploy and manage containerized workloads. VCF empowers platform engineers with native [Kubernetes multi-cluster management][110] capability for managing Kubernetes (K8s) infrastructure at scale. VCF utilizes Velero for Kubernetes data protection enabling platform engineers to back up and restore containerized workloads manifests & persistent volumes, helping to increase the resiliency of stateful applications in VKS cluster.
|
||||
|
||||
## Adding your organization to the list of Velero Adopters
|
||||
|
||||
If you are using Velero and would like to be included in the list of `Velero Adopters`, add an SVG version of your logo to the `site/static/img/adopters` directory in this repo and submit a [pull request][3] with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). See this for an example [PR][4].
|
||||
@@ -125,3 +129,8 @@ If you would like to add your logo to a future `Adopters of Velero` section on [
|
||||
|
||||
[105]: https://azure.microsoft.com/
|
||||
[106]: https://learn.microsoft.com/azure/backup/backup-overview
|
||||
|
||||
[107]: https://www.broadcom.com/
|
||||
[108]: https://www.vmware.com/products/cloud-infrastructure/vmware-cloud-foundation
|
||||
[109]: https://www.vmware.com/products/cloud-infrastructure/vsphere-kubernetes-service
|
||||
[110]: https://blogs.vmware.com/cloud-foundation/2025/09/29/empowering-platform-engineers-with-native-kubernetes-multi-cluster-management-in-vmware-cloud-foundation/
|
||||
@@ -7,11 +7,11 @@
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
|---------------------|---------------------------------------------------------------|--------------------------------------------------|
|
||||
| Scott Seago | [sseago](https://github.com/sseago) | [OpenShift](https://github.com/openshift) |
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/) |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | Broadcom |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | Broadcom |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | Broadcom |
|
||||
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | Broadcom |
|
||||
| Anshul Ahuja | [anshulahuja98](https://github.com/anshulahuja98) | [Microsoft Azure](https://www.github.com/azure/) |
|
||||
| Tiger Kaovilai | [kaovilai](https://github.com/kaovilai) | [OpenShift](https://github.com/openshift) |
|
||||
|
||||
@@ -27,14 +27,3 @@
|
||||
* JenTing Hsiao ([jenting](https://github.com/jenting))
|
||||
* Dave Smith-Uchida ([dsu-igeek](https://github.com/dsu-igeek))
|
||||
* Ming Qiu ([qiuming-best](https://github.com/qiuming-best))
|
||||
|
||||
## Velero Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
|------------------------|:------------------------------------------------------------------------------------:|
|
||||
| Technical Lead | Daniel Jiang [reasonerjt](https://github.com/reasonerjt) |
|
||||
| Kubernetes CSI Liaison | |
|
||||
| Deployment | |
|
||||
| Community Management | Orlin Vasilev [OrlinVasilev](https://github.com/OrlinVasilev) |
|
||||
| Product Management | Pradeep Kumar Chaturvedi [pradeepkchaturvedi](https://github.com/pradeepkchaturvedi) |
|
||||
|
||||
|
||||
@@ -42,13 +42,11 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
|
||||
| 1.18 | 1.18-latest | 1.33.7, 1.34.1, and 1.35.0 |
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
| 1.13 | 1.18-latest | 1.26.5, 1.27.3, 1.27.8, and 1.28.3 |
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.26.7, and 1.27.3 |
|
||||
| 1.11 | 1.18-latest | 1.23.10, 1.24.9, 1.25.5, and 1.26.1 |
|
||||
|
||||
Velero supports IPv4, IPv6, and dual stack environments. Support for this was tested against Velero v1.8.
|
||||
|
||||
|
||||
109
changelogs/CHANGELOG-1.18.md
Normal file
109
changelogs/CHANGELOG-1.18.md
Normal file
@@ -0,0 +1,109 @@
|
||||
## v1.18
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.18.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.18.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.18/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.18/upgrade-to-1.18/
|
||||
|
||||
### Highlights
|
||||
#### Concurrent backup
|
||||
In v1.18, Velero is capable to process multiple backups concurrently. This is a significant usability improvement, especially for multiple tenants or multiple users case, backups submitted from different users could run their backups simultaneously without interfering with each other.
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/concurrent-backup-processing.md for more details.
|
||||
|
||||
#### Cache volume for data movers
|
||||
In v1.18, Velero allows users to configure cache volumes for data mover pods during restore for CSI snapshot data movement and fs-backup. This brings below benefits:
|
||||
- Solve the problem that data mover pods fail to when pod's ephemeral disk is limited
|
||||
- Solve the problem that multiple data mover pods fail to run concurrently in one node when the node's ephemeral disk is limited
|
||||
- Working together with backup repository's cache limit configuration, cache volume with appropriate size helps to improve the restore throughput
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/backup-repo-cache-volume.md for more details.
|
||||
|
||||
#### Incremental size for data movers
|
||||
In v1.18, Velero allows users to observe the incremental size of data movers backups for CSI snapshot data movement and fs-backup, so that users could visually see the data reduction due to incremental backup.
|
||||
|
||||
#### Wildcard support for namespaces
|
||||
In v1.18, Velero allows to use Glob regular expressions for namespace filters during backup and restore, so that users could filter namespaces in a batch manner.
|
||||
|
||||
#### VolumePolicy for PVC phase
|
||||
In v1.18, Velero VolumePolicy supports actions by PVC phase, which help users to do special operations for PVCs with a specific phase, e.g., skip PVCs in Pending/Lost status from the backup.
|
||||
|
||||
#### Scalability and Resiliency improvements
|
||||
##### Prevent Velero server OOM Kill for large backup repositories
|
||||
In v1.18, some backup repository operations are delay executed out of Velero server, so Velero server won't be OOM Killed.
|
||||
|
||||
#### Performance improvement for VolumePolicy
|
||||
In v1.18, VolumePolicy is enhanced for large number of pods/PVCs so that the performance is significantly improved.
|
||||
|
||||
#### Events for data mover pod diagnostic
|
||||
In v1.18, events are recorded into data mover pod diagnostic, which allows user to see more information for troubleshooting when the data mover pod fails.
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: 1.25.7
|
||||
kopia: 0.22.3
|
||||
|
||||
### Limitations/Known issues
|
||||
|
||||
### Breaking changes
|
||||
#### Deprecation of PVC selected node feature
|
||||
According to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/main/GOVERNANCE.md#deprecation-policy), PVC selected node feature is deprecated in v1.18. Velero could appropriately handle PVC's selected-node annotation, so users don't need to do anything particularly.
|
||||
|
||||
### All Changes
|
||||
* Remove backup from running list when backup fails validation (#9498, @sseago)
|
||||
* Maintenance Job only uses the first element of the LoadAffinity array (#9494, @blackpiglet)
|
||||
* Fix issue #9478, add diagnose info on expose peek fails (#9481, @Lyndon-Li)
|
||||
* Add Role, RoleBinding, ClusterRole, and ClusterRoleBinding in restore sequence. (#9474, @blackpiglet)
|
||||
* Add maintenance job and data mover pod's labels and annotations setting. (#9452, @blackpiglet)
|
||||
* Fix plugin init container names exceeding DNS-1123 limit (#9445, @mpryc)
|
||||
* Add PVC-to-Pod cache to improve volume policy performance (#9441, @shubham-pampattiwar)
|
||||
* Remove VolumeSnapshotClass from CSI B/R process. (#9431, @blackpiglet)
|
||||
* Use hookIndex for recording multiple restore exec hooks. (#9366, @blackpiglet)
|
||||
* Sanitize Azure HTTP responses in BSL status messages (#9321, @shubham-pampattiwar)
|
||||
* Remove labels associated with previous backups (#9206, @Joeavaikath)
|
||||
* Add VolumePolicy support for PVC Phase conditions to allow skipping Pending PVCs (#9166, @claude)
|
||||
* feat: Enhance BackupStorageLocation with Secret-based CA certificate support (#9141, @kaovilai)
|
||||
* Add `--apply` flag to `install` command, allowing usage of Kubernetes apply to make changes to existing installs (#9132, @mjnagel)
|
||||
* Fix issue #9194, add doc for GOMAXPROCS behavior change (#9420, @Lyndon-Li)
|
||||
* Apply volume policies to VolumeGroupSnapshot PVC filtering (#9419, @shubham-pampattiwar)
|
||||
* Fix issue #9276, add doc for cache volume support (#9418, @Lyndon-Li)
|
||||
* Add Prometheus metrics for maintenance jobs (#9414, @shubham-pampattiwar)
|
||||
* Fix issue #9400, connect repo first time after creation so that init params could be written (#9407, @Lyndon-Li)
|
||||
* Cache volume for PVR (#9397, @Lyndon-Li)
|
||||
* Cache volume support for DataDownload (#9391, @Lyndon-Li)
|
||||
* don't copy securitycontext from first container if configmap found (#9389, @sseago)
|
||||
* Refactor repo provider interface for static configuration (#9379, @Lyndon-Li)
|
||||
* Fix issue #9365, prevent fake completion notification due to multiple update of single PVR (#9375, @Lyndon-Li)
|
||||
* Add cache volume configuration (#9370, @Lyndon-Li)
|
||||
* Track actual resource names for GenerateName in restore status (#9368, @shubham-pampattiwar)
|
||||
* Fix managed fields patch for resources using GenerateName (#9367, @shubham-pampattiwar)
|
||||
* Support cache volume for generic restore exposer and pod volume exposer (#9362, @Lyndon-Li)
|
||||
* Add incrementalSize to DU/PVB for reporting new/changed size (#9357, @sseago)
|
||||
* Add snapshotSize for DataDownload, PodVolumeRestore (#9354, @Lyndon-Li)
|
||||
* Add cache dir configuration for udmrepo (#9353, @Lyndon-Li)
|
||||
* Fix the Job build error when BackupReposiotry name longer than 63. (#9350, @blackpiglet)
|
||||
* Add cache configuration to VGDP (#9342, @Lyndon-Li)
|
||||
* Fix issue #9332, add bytesDone for cache files (#9333, @Lyndon-Li)
|
||||
* Fix typos in documentation (#9329, @T4iFooN-IX)
|
||||
* Concurrent backup processing (#9307, @sseago)
|
||||
* VerifyJSONConfigs verify every elements in Data. (#9302, @blackpiglet)
|
||||
* Fix issue #9267, add events to data mover prepare diagnostic (#9296, @Lyndon-Li)
|
||||
* Add option for privileged fs-backup pod (#9295, @sseago)
|
||||
* Fix issue #9193, don't connect repo in repo controller (#9291, @Lyndon-Li)
|
||||
* Implement concurrency control for cache of native VolumeSnapshotter plugin. (#9281, @0xLeo258)
|
||||
* Fix issue #7904, remove the code and doc for PVC node selection (#9269, @Lyndon-Li)
|
||||
* Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases (#9264, @shubham-pampattiwar)
|
||||
* Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment (#9256, @shubham-pampattiwar)
|
||||
* Implement wildcard namespace pattern expansion for backup namespace includes/excludes. This change adds support for wildcard patterns (*, ?, [abc], {a,b,c}) in namespace includes and excludes during backup operations (#9255, @Joeavaikath)
|
||||
* Protect VolumeSnapshot field from race condition during multi-thread backup (#9248, @0xLeo258)
|
||||
* Update AzureAD Microsoft Authentication Library to v1.5.0 (#9244, @priyansh17)
|
||||
* Get pod list once per namespace in pvc IBA (#9226, @sseago)
|
||||
* Fix issue #7725, add design for backup repo cache configuration (#9148, @Lyndon-Li)
|
||||
* Fix issue #9229, don't attach backupPVC to the source node (#9233, @Lyndon-Li)
|
||||
* feat: Permit specifying annotations for the BackupPVC (#9173, @clementnuss)
|
||||
@@ -1 +0,0 @@
|
||||
Add `--apply` flag to `install` command, allowing usage of Kubernetes apply to make changes to existing installs
|
||||
@@ -1 +0,0 @@
|
||||
feat: Enhance BackupStorageLocation with Secret-based CA certificate support
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #7725, add design for backup repo cache configuration
|
||||
@@ -1 +0,0 @@
|
||||
Add VolumePolicy support for PVC Phase conditions to allow skipping Pending PVCs
|
||||
@@ -1 +0,0 @@
|
||||
feat: Permit specifying annotations for the BackupPVC
|
||||
@@ -1 +0,0 @@
|
||||
Remove labels associated with previous backups
|
||||
@@ -1 +0,0 @@
|
||||
Get pod list once per namespace in pvc IBA
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9229, don't attach backupPVC to the source node
|
||||
@@ -1 +0,0 @@
|
||||
Update AzureAD Microsoft Authentication Library to v1.5.0
|
||||
@@ -1 +0,0 @@
|
||||
Protect VolumeSnapshot field from race condition during multi-thread backup
|
||||
@@ -1,10 +0,0 @@
|
||||
Implement wildcard namespace pattern expansion for backup namespace includes/excludes.
|
||||
|
||||
This change adds support for wildcard patterns (*, ?, [abc], {a,b,c}) in namespace includes and excludes during backup operations.
|
||||
When wildcard patterns are detected, they are expanded against the list of active namespaces in the cluster before the backup proceeds.
|
||||
|
||||
Key features:
|
||||
- Wildcard patterns in namespace includes/excludes are automatically detected and expanded
|
||||
- Pattern validation ensures unsupported patterns (regex, consecutive asterisks) are rejected
|
||||
- Empty wildcard results (e.g., "invalid*" matching no namespaces) correctly result in empty backups
|
||||
- Exact namespace names and "*" continue to work as before (no expansion needed)
|
||||
@@ -1 +0,0 @@
|
||||
Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment
|
||||
@@ -1 +0,0 @@
|
||||
Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #7904, remove the code and doc for PVC node selection
|
||||
@@ -1 +0,0 @@
|
||||
Implement concurrency control for cache of native VolumeSnapshotter plugin.
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9193, don't connect repo in repo controller
|
||||
@@ -1 +0,0 @@
|
||||
Add option for privileged fs-backup pod
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
@@ -1 +0,0 @@
|
||||
VerifyJSONConfigs verify every elements in Data.
|
||||
@@ -1 +0,0 @@
|
||||
Concurrent backup processing
|
||||
@@ -1 +0,0 @@
|
||||
Sanitize Azure HTTP responses in BSL status messages
|
||||
@@ -1 +0,0 @@
|
||||
Fix typos in documentation
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9332, add bytesDone for cache files
|
||||
@@ -1 +0,0 @@
|
||||
Add cache configuration to VGDP
|
||||
@@ -1 +0,0 @@
|
||||
Fix the Job build error when BackupReposiotry name longer than 63.
|
||||
@@ -1 +0,0 @@
|
||||
Add cache dir configuration for udmrepo
|
||||
@@ -1 +0,0 @@
|
||||
Add snapshotSize for DataDownload, PodVolumeRestore
|
||||
@@ -1 +0,0 @@
|
||||
Add incrementalSize to DU/PVB for reporting new/changed size
|
||||
@@ -1 +0,0 @@
|
||||
Support cache volume for generic restore exposer and pod volume exposer
|
||||
@@ -1 +0,0 @@
|
||||
Use hookIndex for recording multiple restore exec hooks.
|
||||
@@ -1 +0,0 @@
|
||||
Fix managed fields patch for resources using GenerateName
|
||||
@@ -1 +0,0 @@
|
||||
Track actual resource names for GenerateName in restore status
|
||||
@@ -1 +0,0 @@
|
||||
Add cache volume configuration
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9365, prevent fake completion notification due to multiple update of single PVR
|
||||
@@ -1 +0,0 @@
|
||||
Refactor repo provider interface for static configuration
|
||||
@@ -1 +0,0 @@
|
||||
don't copy securitycontext from first container if configmap found
|
||||
@@ -1 +0,0 @@
|
||||
Cache volume support for DataDownload
|
||||
@@ -1 +0,0 @@
|
||||
Cache volume for PVR
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9400, connect repo first time after creation so that init params could be written
|
||||
@@ -1 +0,0 @@
|
||||
Add Prometheus metrics for maintenance jobs
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9276, add doc for cache volume support
|
||||
@@ -1 +0,0 @@
|
||||
Apply volume policies to VolumeGroupSnapshot PVC filtering
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9194, add doc for GOMAXPROCS behavior change
|
||||
@@ -1 +0,0 @@
|
||||
Remove VolumeSnapshotClass from CSI B/R process.
|
||||
@@ -1 +0,0 @@
|
||||
Add PVC-to-Pod cache to improve volume policy performance
|
||||
@@ -1 +0,0 @@
|
||||
Fix plugin init container names exceeding DNS-1123 limit
|
||||
@@ -1 +0,0 @@
|
||||
Add maintenance job and data mover pod's labels and annotations setting.
|
||||
@@ -1 +0,0 @@
|
||||
Add Role, RoleBinding, ClusterRole, and ClusterRoleBinding in restore sequence.
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9478, add diagnose info on expose peek fails
|
||||
@@ -1 +0,0 @@
|
||||
Maintenance Job only uses the first element of the LoadAffinity array
|
||||
@@ -1 +0,0 @@
|
||||
Remove backup from running list when backup fails validation
|
||||
1
changelogs/unreleased/9502-Joeavaikath
Normal file
1
changelogs/unreleased/9502-Joeavaikath
Normal file
@@ -0,0 +1 @@
|
||||
Support all glob wildcard characters in namespace validation
|
||||
1
changelogs/unreleased/9508-kaovilai
Normal file
1
changelogs/unreleased/9508-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
Fix VolumePolicy PVC phase condition filter for unbound PVCs (#9507)
|
||||
1
changelogs/unreleased/9532-Lyndon-Li
Normal file
1
changelogs/unreleased/9532-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9343, include PV topology to data mover pod affinities
|
||||
1
changelogs/unreleased/9533-Lyndon-Li
Normal file
1
changelogs/unreleased/9533-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9496, support customized host os
|
||||
1
changelogs/unreleased/9547-blackpiglet
Normal file
1
changelogs/unreleased/9547-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
If BIA return updateObj with SkipFromBackupAnnotation, treat it as skip the resource from backup.
|
||||
1
changelogs/unreleased/9554-testsabirweb
Normal file
1
changelogs/unreleased/9554-testsabirweb
Normal file
@@ -0,0 +1 @@
|
||||
Issue #9544: Add test coverage for S3 bucket name in MRAP ARN notation and fix bucket validation to accept ARN format
|
||||
1
changelogs/unreleased/9560-Lyndon-Li
Normal file
1
changelogs/unreleased/9560-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9475, use node-selector instead of nodName for generic restore
|
||||
1
changelogs/unreleased/9561-Lyndon-Li
Normal file
1
changelogs/unreleased/9561-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9460, flush buffer before data mover completes
|
||||
1
changelogs/unreleased/9570-H-M-Quang-Ngo
Normal file
1
changelogs/unreleased/9570-H-M-Quang-Ngo
Normal file
@@ -0,0 +1 @@
|
||||
Add schedule_expected_interval_seconds metric for dynamic backup alerting thresholds (#9559)
|
||||
1
changelogs/unreleased/9574-blackpiglet
Normal file
1
changelogs/unreleased/9574-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Add ephemeral storage limit and request support for data mover and maintenance job
|
||||
1
changelogs/unreleased/9581-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9581-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Fix DBR stuck when CSI snapshot no longer exists in cloud provider
|
||||
12
go.mod
12
go.mod
@@ -43,6 +43,7 @@ require (
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/mod v0.30.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
golang.org/x/sys v0.40.0
|
||||
golang.org/x/text v0.31.0
|
||||
google.golang.org/api v0.256.0
|
||||
google.golang.org/grpc v1.77.0
|
||||
@@ -171,11 +172,11 @@ require (
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
@@ -183,7 +184,6 @@ require (
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
|
||||
24
go.sum
24
go.sum
@@ -748,18 +748,18 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
|
||||
@@ -969,8 +969,8 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
||||
@@ -21,9 +21,11 @@ ENV GO111MODULE=on
|
||||
ENV GOPROXY=${GOPROXY}
|
||||
|
||||
# kubebuilder test bundle is separated from kubebuilder. Need to setup it for CI test.
|
||||
RUN curl -sSLo envtest-bins.tar.gz https://go.kubebuilder.io/test-tools/1.22.1/linux/$(go env GOARCH) && \
|
||||
mkdir /usr/local/kubebuilder && \
|
||||
tar -C /usr/local/kubebuilder --strip-components=1 -zvxf envtest-bins.tar.gz
|
||||
# Using setup-envtest to download envtest binaries
|
||||
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
|
||||
mkdir -p /usr/local/kubebuilder/bin && \
|
||||
ENVTEST_ASSETS_DIR=$(setup-envtest use 1.33.0 --bin-dir /usr/local/kubebuilder/bin -p path) && \
|
||||
cp -r ${ENVTEST_ASSETS_DIR}/* /usr/local/kubebuilder/bin/
|
||||
|
||||
RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/download/v3.2.0/kubebuilder_linux_$(go env GOARCH) && \
|
||||
mv kubebuilder_linux_$(go env GOARCH) /usr/local/kubebuilder/bin/kubebuilder && \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..6ae17f4a1 100644
|
||||
index 5f939c481..f6205aa3c 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,31 @@ require (
|
||||
@@ -14,13 +14,13 @@ index 5f939c481..6ae17f4a1 100644
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.36.0
|
||||
+ golang.org/x/net v0.38.0
|
||||
+ golang.org/x/crypto v0.45.0
|
||||
+ golang.org/x/net v0.47.0
|
||||
+ golang.org/x/oauth2 v0.28.0
|
||||
+ golang.org/x/sync v0.12.0
|
||||
+ golang.org/x/sys v0.31.0
|
||||
+ golang.org/x/term v0.30.0
|
||||
+ golang.org/x/text v0.23.0
|
||||
+ golang.org/x/sync v0.18.0
|
||||
+ golang.org/x/sys v0.38.0
|
||||
+ golang.org/x/term v0.37.0
|
||||
+ golang.org/x/text v0.31.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
@@ -64,11 +64,11 @@ index 5f939c481..6ae17f4a1 100644
|
||||
)
|
||||
|
||||
-go 1.18
|
||||
+go 1.23.0
|
||||
+go 1.24.0
|
||||
+
|
||||
+toolchain go1.23.7
|
||||
+toolchain go1.24.11
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..805792055 100644
|
||||
index 026e1d2fa..4a37e7ac7 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,24 @@
|
||||
@@ -170,8 +170,8 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
+golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
+golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -181,8 +181,8 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
@@ -194,8 +194,8 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -205,21 +205,21 @@ index 026e1d2fa..805792055 100644
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
||||
@@ -137,6 +137,10 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
return checkVSCReadiness(ctx, &snapCont, p.crClient)
|
||||
},
|
||||
); err != nil {
|
||||
// Clean up the VSC we created since it can't become ready
|
||||
if deleteErr := p.crClient.Delete(context.TODO(), &snapCont); deleteErr != nil && !apierrors.IsNotFound(deleteErr) {
|
||||
p.log.WithError(deleteErr).Errorf("Failed to clean up VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
}
|
||||
|
||||
@@ -167,6 +171,13 @@ var checkVSCReadiness = func(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Fail fast on permanent CSI driver errors (e.g., InvalidSnapshot.NotFound)
|
||||
if tmpVSC.Status != nil && tmpVSC.Status.Error != nil && tmpVSC.Status.Error.Message != nil {
|
||||
return false, errors.Errorf(
|
||||
"VolumeSnapshotContent %s has error: %s", vsc.Name, *tmpVSC.Status.Error.Message,
|
||||
)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -94,6 +94,19 @@ func TestVSCExecute(t *testing.T) {
|
||||
return false, errors.Errorf("test error case")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error case with CSI error, dangling VSC should be cleaned up",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return false, errors.Errorf("VolumeSnapshotContent %s has error: InvalidSnapshot.NotFound", vsc.Name)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -190,6 +203,24 @@ func TestCheckVSCReadiness(t *testing.T) {
|
||||
expectErr: false,
|
||||
ready: false,
|
||||
},
|
||||
{
|
||||
name: "VSC has error from CSI driver",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotContentStatus{
|
||||
ReadyToUse: boolPtr(false),
|
||||
Error: &snapshotv1api.VolumeSnapshotError{
|
||||
Message: stringPtr("InvalidSnapshot.NotFound: The snapshot 'snap-0abc123' does not exist."),
|
||||
},
|
||||
},
|
||||
},
|
||||
createVSC: true,
|
||||
expectErr: true,
|
||||
ready: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -207,3 +238,11 @@ func TestCheckVSCReadiness(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func stringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
@@ -134,6 +134,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
pv := new(corev1api.PersistentVolume)
|
||||
var err error
|
||||
|
||||
var pvNotFoundErr error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
|
||||
v.logger.WithError(err).Error("fail to convert unstructured into PVC")
|
||||
@@ -142,8 +143,10 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
|
||||
pv, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, err
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
pv = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,7 +161,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for %+v", vfd)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -167,15 +170,21 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
// If there is no match action, go on to the next check.
|
||||
if action != nil {
|
||||
if action.Type == resourcepolicies.Snapshot {
|
||||
v.logger.Infof(fmt.Sprintf("performing snapshot action for pv %s", pv.Name))
|
||||
v.logger.Infof("performing snapshot action for %+v", vfd)
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skip snapshot action for pv %s as the action type is %s", pv.Name, action.Type)
|
||||
v.logger.Infof("Skip snapshot action for %+v as the action type is %s", vfd, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If resource is PVC, and PV is nil (e.g., Pending/Lost PVC with no matching policy), return the original error
|
||||
if groupResource == kuberesource.PersistentVolumeClaims && pv == nil && pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, pvNotFoundErr
|
||||
}
|
||||
|
||||
// If this PV is claimed, see if we've already taken a (pod volume backup)
|
||||
// snapshot of the contents of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
@@ -209,7 +218,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
return true, nil
|
||||
}
|
||||
|
||||
v.logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s possibly due to no volume policy setting or snapshotVolumes is false", pv.Name))
|
||||
v.logger.Infof("skipping snapshot action for pv %s possibly due to no volume policy setting or snapshotVolumes is false", pv.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -219,6 +228,7 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var pvNotFoundErr error
|
||||
if v.volumePolicy != nil {
|
||||
var resource any
|
||||
var err error
|
||||
@@ -230,10 +240,13 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
|
||||
return false, err
|
||||
}
|
||||
resource, err = kubeutil.GetPVForPVC(pvc, v.client)
|
||||
pvResource, err := kubeutil.GetPVForPVC(pvc, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, err
|
||||
// Any error means PV not available - save to return later if no policy matches
|
||||
v.logger.Debugf("PV not found for PVC %s: %v", pvc.Namespace+"/"+pvc.Name, err)
|
||||
pvNotFoundErr = err
|
||||
} else {
|
||||
resource = pvResource
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +273,12 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no policy matched and PV was not found, return the original error
|
||||
if pvNotFoundErr != nil {
|
||||
v.logger.WithError(pvNotFoundErr).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
|
||||
return false, pvNotFoundErr
|
||||
}
|
||||
}
|
||||
|
||||
if v.shouldPerformFSBackupLegacy(volume, pod) {
|
||||
|
||||
@@ -286,7 +286,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "PVC not having PV, return false and error case PV not found",
|
||||
name: "PVC not having PV, return false and error when no matching policy",
|
||||
inputObj: builder.ForPersistentVolumeClaim("default", "example-pvc").StorageClass("gp2-csi").Result(),
|
||||
groupResource: kuberesource.PersistentVolumeClaims,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
@@ -1234,3 +1234,312 @@ func TestNewVolumeHelperImplWithCache_UsesCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldSnapshot, "Expected snapshot to be skipped due to fs-backup selection via cache")
|
||||
}
|
||||
|
||||
// TestVolumeHelperImpl_ShouldPerformSnapshot_UnboundPVC tests that Pending and Lost PVCs with
|
||||
// phase-based skip policies don't cause errors when GetPVForPVC would fail.
|
||||
func TestVolumeHelperImpl_ShouldPerformSnapshot_UnboundPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputPVC *corev1api.PersistentVolumeClaim
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
shouldSnapshot bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with phase-based skip policy should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-pending").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC without matching skip policy should error (no PV)",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-pending-no-policy").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with phase-based skip policy should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with policy for Pending and Lost should not error and return false",
|
||||
inputPVC: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
vh := NewVolumeHelperImpl(
|
||||
p,
|
||||
ptr.To(true),
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.inputPVC)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, kuberesource.PersistentVolumeClaims)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError, "Want error; Got nil error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldSnapshot, actualShouldSnapshot, "Want shouldSnapshot as %t; Got shouldSnapshot as %t", tc.shouldSnapshot, actualShouldSnapshot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeHelperImpl_ShouldPerformFSBackup_UnboundPVC tests that Pending and Lost PVCs with
|
||||
// phase-based skip policies don't cause errors when GetPVForPVC would fail.
|
||||
func TestVolumeHelperImpl_ShouldPerformFSBackup_UnboundPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
shouldFSBackup bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with phase-based skip policy should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-pending",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-pending",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-pending").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC without matching skip policy should error (no PV)",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-pending",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-pending-no-policy",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-pending-no-policy").
|
||||
StorageClass("non-existent-class").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with phase-based skip policy should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-lost",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-lost",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with policy for Pending and Lost should not error and return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-lost",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-lost",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pvc-lost").
|
||||
StorageClass("some-class").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.pvc)
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
vh := NewVolumeHelperImpl(
|
||||
p,
|
||||
ptr.To(true),
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
actualShouldFSBackup, actualError := vh.ShouldPerformFSBackup(tc.pod.Spec.Volumes[0], *tc.pod)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError, "Want error; Got nil error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldFSBackup, actualShouldFSBackup, "Want shouldFSBackup as %t; Got shouldFSBackup as %t", tc.shouldFSBackup, actualShouldFSBackup)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,6 +102,15 @@ const (
|
||||
// even if the resource contains a matching selector label.
|
||||
ExcludeFromBackupLabel = "velero.io/exclude-from-backup"
|
||||
|
||||
// SkipFromBackupAnnotation is the annotation used by internal BackupItemActions
|
||||
// to indicate that a resource should be skipped from backup,
|
||||
// even if it doesn't have the ExcludeFromBackupLabel.
|
||||
// This is used in cases where we want to skip backup of a resource based on some logic in a plugin.
|
||||
//
|
||||
// Notice: SkipFromBackupAnnotation's priority is higher than MustIncludeAdditionalItemAnnotation.
|
||||
// If SkipFromBackupAnnotation is set, the resource will be skipped even if MustIncludeAdditionalItemAnnotation is set.
|
||||
SkipFromBackupAnnotation = "velero.io/skip-from-backup"
|
||||
|
||||
// defaultVGSLabelKey is the default label key used to group PVCs under a VolumeGroupSnapshot
|
||||
DefaultVGSLabelKey = "velero.io/volume-group"
|
||||
|
||||
|
||||
@@ -98,6 +98,14 @@ func (m *backedUpItemsMap) AddItem(key itemKey) {
|
||||
m.totalItems[key] = struct{}{}
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) DeleteItem(key itemKey) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
delete(m.backedUpItems, key)
|
||||
delete(m.totalItems, key)
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) AddItemToTotal(key itemKey) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
@@ -244,6 +244,14 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
||||
return false, itemFiles, kubeerrs.NewAggregate(backupErrs)
|
||||
}
|
||||
|
||||
// If err is nil and updatedObj is nil, it means the item is skipped by plugin action,
|
||||
// we should return here to avoid backing up the item, and avoid potential NPE in the following code.
|
||||
if updatedObj == nil {
|
||||
log.Infof("Remove item from the backup's backupItems list and totalItems list because it's skipped by plugin action.")
|
||||
ib.backupRequest.BackedUpItems.DeleteItem(key)
|
||||
return false, itemFiles, nil
|
||||
}
|
||||
|
||||
itemFiles = append(itemFiles, additionalItemFiles...)
|
||||
obj = updatedObj
|
||||
if metadata, err = meta.Accessor(obj); err != nil {
|
||||
@@ -398,6 +406,13 @@ func (ib *itemBackupper) executeActions(
|
||||
}
|
||||
|
||||
u := &unstructured.Unstructured{Object: updatedItem.UnstructuredContent()}
|
||||
|
||||
if _, ok := u.GetAnnotations()[velerov1api.SkipFromBackupAnnotation]; ok {
|
||||
log.Infof("Resource (groupResource=%s, namespace=%s, name=%s) is skipped from backup by action %s.",
|
||||
groupResource.String(), namespace, name, actionName)
|
||||
return nil, itemFiles, nil
|
||||
}
|
||||
|
||||
if actionName == csiBIAPluginName {
|
||||
if additionalItemIdentifiers == nil && u.GetAnnotations()[velerov1api.SkippedNoCSIPVAnnotation] == "true" {
|
||||
// snapshot was skipped by CSI plugin
|
||||
@@ -687,15 +702,14 @@ func (ib *itemBackupper) getMatchAction(obj runtime.Unstructured, groupResource
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName == "" {
|
||||
return nil, errors.Errorf("PVC has no volume backing this claim")
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
var pv *corev1api.PersistentVolume
|
||||
if pvName := pvc.Spec.VolumeName; pvName != "" {
|
||||
pv = &corev1api.PersistentVolume{}
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
// If pv is nil for unbound PVCs - policy matching will use PVC-only conditions
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
return ib.backupRequest.ResPolicies.GetMatchAction(vfd)
|
||||
}
|
||||
@@ -709,7 +723,10 @@ func (ib *itemBackupper) trackSkippedPV(obj runtime.Unstructured, groupResource
|
||||
if name, err := getPVName(obj, groupResource); len(name) > 0 && err == nil {
|
||||
ib.backupRequest.SkippedPVTracker.Track(name, approach, reason)
|
||||
} else if err != nil {
|
||||
log.WithError(err).Warnf("unable to get PV name, skip tracking.")
|
||||
// Log at info level for tracking purposes. This is not an error because
|
||||
// it's expected for some resources (e.g., PVCs in Pending or Lost phase)
|
||||
// to not have a PV name. This occurs when volume policy skips unbound PVCs.
|
||||
log.WithError(err).Infof("unable to get PV name, skip tracking.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -719,6 +736,17 @@ func (ib *itemBackupper) unTrackSkippedPV(obj runtime.Unstructured, groupResourc
|
||||
if name, err := getPVName(obj, groupResource); len(name) > 0 && err == nil {
|
||||
ib.backupRequest.SkippedPVTracker.Untrack(name)
|
||||
} else if err != nil {
|
||||
// For PVCs in Pending or Lost phase, it's expected that there's no PV name.
|
||||
// Log at debug level instead of warning to reduce noise.
|
||||
if groupResource == kuberesource.PersistentVolumeClaims {
|
||||
pvc := new(corev1api.PersistentVolumeClaim)
|
||||
if convErr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); convErr == nil {
|
||||
if pvc.Status.Phase == corev1api.ClaimPending || pvc.Status.Phase == corev1api.ClaimLost {
|
||||
log.WithError(err).Debugf("unable to get PV name for %s PVC, skip untracking.", pvc.Status.Phase)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
log.WithError(err).Warnf("unable to get PV name, skip untracking.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,12 +17,15 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -269,3 +272,225 @@ func TestAddVolumeInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMatchAction_PendingLostPVC(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
require.NoError(t, corev1api.AddToScheme(scheme))
|
||||
|
||||
// Create resource policies that skip Pending/Lost PVCs
|
||||
resPolicies := &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcPhase": []string{"Pending", "Lost"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
policies := &resourcepolicies.Policies{}
|
||||
err := policies.BuildPolicy(resPolicies)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
expectedAction *resourcepolicies.Action
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC with no VolumeName should match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
StorageClass("test-sc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
pv: nil,
|
||||
expectedAction: &resourcepolicies.Action{Type: resourcepolicies.Skip},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC with no VolumeName should match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
StorageClass("test-sc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
pv: nil,
|
||||
expectedAction: &resourcepolicies.Action{Type: resourcepolicies.Skip},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Bound PVC with VolumeName and matching PV should not match pvcPhase policy",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
StorageClass("test-sc").
|
||||
VolumeName("test-pv").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
pv: builder.ForPersistentVolume("test-pv").StorageClass("test-sc").Result(),
|
||||
expectedAction: nil,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Build fake client with PV if present
|
||||
clientBuilder := ctrlfake.NewClientBuilder().WithScheme(scheme)
|
||||
if tc.pv != nil {
|
||||
clientBuilder = clientBuilder.WithObjects(tc.pv)
|
||||
}
|
||||
fakeClient := clientBuilder.Build()
|
||||
|
||||
ib := &itemBackupper{
|
||||
kbClient: fakeClient,
|
||||
backupRequest: &Request{
|
||||
ResPolicies: policies,
|
||||
},
|
||||
}
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
action, err := ib.getMatchAction(obj, kuberesource.PersistentVolumeClaims, csiBIAPluginName)
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tc.expectedAction == nil {
|
||||
assert.Nil(t, action)
|
||||
} else {
|
||||
require.NotNil(t, action)
|
||||
assert.Equal(t, tc.expectedAction.Type, action.Type)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackSkippedPV_PendingLostPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "Lost PVC should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "Bound PVC without VolumeName should log at info level",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ib := &itemBackupper{
|
||||
backupRequest: &Request{
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
},
|
||||
}
|
||||
|
||||
// Set up log capture
|
||||
logOutput := &bytes.Buffer{}
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(logOutput)
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, "", "test reason", logger)
|
||||
|
||||
logStr := logOutput.String()
|
||||
assert.Contains(t, logStr, "level=info")
|
||||
assert.Contains(t, logStr, "unable to get PV name, skip tracking.")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnTrackSkippedPV_PendingLostPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectWarningLog bool
|
||||
expectDebugMessage string
|
||||
}{
|
||||
{
|
||||
name: "Pending PVC should log at debug level, not warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "pending-pvc").
|
||||
Phase(corev1api.ClaimPending).
|
||||
Result(),
|
||||
expectWarningLog: false,
|
||||
expectDebugMessage: "unable to get PV name for Pending PVC, skip untracking.",
|
||||
},
|
||||
{
|
||||
name: "Lost PVC should log at debug level, not warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "lost-pvc").
|
||||
Phase(corev1api.ClaimLost).
|
||||
Result(),
|
||||
expectWarningLog: false,
|
||||
expectDebugMessage: "unable to get PV name for Lost PVC, skip untracking.",
|
||||
},
|
||||
{
|
||||
name: "Bound PVC without VolumeName should log warning",
|
||||
pvc: builder.ForPersistentVolumeClaim("ns", "bound-pvc").
|
||||
Phase(corev1api.ClaimBound).
|
||||
Result(),
|
||||
expectWarningLog: true,
|
||||
expectDebugMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ib := &itemBackupper{
|
||||
backupRequest: &Request{
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
},
|
||||
}
|
||||
|
||||
// Set up log capture
|
||||
logOutput := &bytes.Buffer{}
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(logOutput)
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
obj := &unstructured.Unstructured{Object: pvcData}
|
||||
|
||||
ib.unTrackSkippedPV(obj, kuberesource.PersistentVolumeClaims, logger)
|
||||
|
||||
logStr := logOutput.String()
|
||||
if tc.expectWarningLog {
|
||||
assert.Contains(t, logStr, "level=warning")
|
||||
assert.Contains(t, logStr, "unable to get PV name, skip untracking.")
|
||||
} else {
|
||||
assert.NotContains(t, logStr, "level=warning")
|
||||
if tc.expectDebugMessage != "" {
|
||||
assert.Contains(t, logStr, "level=debug")
|
||||
assert.Contains(t, logStr, tc.expectDebugMessage)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,11 +275,21 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
veleroPodResources, err := kubeutil.ParseResourceRequirements(o.VeleroPodCPURequest, o.VeleroPodMemRequest, o.VeleroPodCPULimit, o.VeleroPodMemLimit)
|
||||
veleroPodResources, err := kubeutil.ParseCPUAndMemoryResources(
|
||||
o.VeleroPodCPURequest,
|
||||
o.VeleroPodMemRequest,
|
||||
o.VeleroPodCPULimit,
|
||||
o.VeleroPodMemLimit,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeAgentPodResources, err := kubeutil.ParseResourceRequirements(o.NodeAgentPodCPURequest, o.NodeAgentPodMemRequest, o.NodeAgentPodCPULimit, o.NodeAgentPodMemLimit)
|
||||
nodeAgentPodResources, err := kubeutil.ParseCPUAndMemoryResources(
|
||||
o.NodeAgentPodCPURequest,
|
||||
o.NodeAgentPodMemRequest,
|
||||
o.NodeAgentPodCPULimit,
|
||||
o.NodeAgentPodMemLimit,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -323,7 +323,25 @@ func (s *nodeAgentServer) run() {
|
||||
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
// To make the PodResources ConfigMap without ephemeral storage request/limit backward compatible,
|
||||
// need to avoid set value as empty, because empty string will cause parsing error.
|
||||
ephemeralStorageRequest := constant.DefaultEphemeralStorageRequest
|
||||
if s.dataPathConfigs.PodResources.EphemeralStorageRequest != "" {
|
||||
ephemeralStorageRequest = s.dataPathConfigs.PodResources.EphemeralStorageRequest
|
||||
}
|
||||
ephemeralStorageLimit := constant.DefaultEphemeralStorageLimit
|
||||
if s.dataPathConfigs.PodResources.EphemeralStorageLimit != "" {
|
||||
ephemeralStorageLimit = s.dataPathConfigs.PodResources.EphemeralStorageLimit
|
||||
}
|
||||
|
||||
if res, err := kube.ParseResourceRequirements(
|
||||
s.dataPathConfigs.PodResources.CPURequest,
|
||||
s.dataPathConfigs.PodResources.MemoryRequest,
|
||||
ephemeralStorageRequest,
|
||||
s.dataPathConfigs.PodResources.CPULimit,
|
||||
s.dataPathConfigs.PodResources.MemoryLimit,
|
||||
ephemeralStorageLimit,
|
||||
); err != nil {
|
||||
s.logger.WithError(err).Warn("Pod resource requirements are invalid, ignore")
|
||||
} else {
|
||||
podResources = res
|
||||
|
||||
@@ -23,4 +23,7 @@ const (
|
||||
|
||||
PluginCSIPVCRestoreRIA = "velero.io/csi-pvc-restorer"
|
||||
PluginCsiVolumeSnapshotRestoreRIA = "velero.io/csi-volumesnapshot-restorer"
|
||||
|
||||
DefaultEphemeralStorageRequest = "0"
|
||||
DefaultEphemeralStorageLimit = "0"
|
||||
)
|
||||
|
||||
@@ -129,6 +129,13 @@ func (c *scheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
} else {
|
||||
schedule.Status.Phase = velerov1.SchedulePhaseEnabled
|
||||
schedule.Status.ValidationErrors = nil
|
||||
|
||||
// Compute expected interval between consecutive scheduled backup runs.
|
||||
// Only meaningful when the cron expression is valid.
|
||||
now := c.clock.Now()
|
||||
nextRun := cronSchedule.Next(now)
|
||||
nextNextRun := cronSchedule.Next(nextRun)
|
||||
c.metrics.SetScheduleExpectedIntervalSeconds(schedule.Name, nextNextRun.Sub(nextRun).Seconds())
|
||||
}
|
||||
|
||||
scheduleNeedsPatch := false
|
||||
|
||||
@@ -124,6 +124,15 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
"owner": ownerObject.Name,
|
||||
})
|
||||
|
||||
volumeTopology, err := kube.GetVolumeTopology(ctx, e.kubeClient.CoreV1(), e.kubeClient.StorageV1(), csiExposeParam.SourcePVName, csiExposeParam.StorageClass)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting volume topology for PV %s, storage class %s", csiExposeParam.SourcePVName, csiExposeParam.StorageClass)
|
||||
}
|
||||
|
||||
if volumeTopology != nil {
|
||||
curLog.Infof("Using volume topology %v", volumeTopology)
|
||||
}
|
||||
|
||||
curLog.Info("Exposing CSI snapshot")
|
||||
|
||||
volumeSnapshot, err := csi.WaitVolumeSnapshotReady(ctx, e.csiSnapshotClient, csiExposeParam.SnapshotName, csiExposeParam.SourceNamespace, csiExposeParam.ExposeTimeout, curLog)
|
||||
@@ -254,6 +263,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
csiExposeParam.NodeOS,
|
||||
csiExposeParam.PriorityClassName,
|
||||
intoleratableNodes,
|
||||
volumeTopology,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to create backup pod")
|
||||
@@ -320,7 +330,8 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1a
|
||||
curLog.WithField("pod", pod.Name).Infof("Backup volume is found in pod at index %v", i)
|
||||
|
||||
var nodeOS *string
|
||||
if os, found := pod.Spec.NodeSelector[kube.NodeOSLabel]; found {
|
||||
if pod.Spec.OS != nil {
|
||||
os := string(pod.Spec.OS.Name)
|
||||
nodeOS = &os
|
||||
}
|
||||
|
||||
@@ -588,6 +599,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
nodeOS string,
|
||||
priorityClassName string,
|
||||
intoleratableNodes []string,
|
||||
volumeTopology *corev1api.NodeSelector,
|
||||
) (*corev1api.Pod, error) {
|
||||
podName := ownerObject.Name
|
||||
|
||||
@@ -643,6 +655,10 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
args = append(args, podInfo.logFormatArgs...)
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
if affinity == nil {
|
||||
affinity = &kube.LoadAffinity{}
|
||||
}
|
||||
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1api.PodOS{}
|
||||
@@ -654,9 +670,14 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
},
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
|
||||
podOS.Name = kube.NodeOSWindows
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: kube.NodeOSLabel,
|
||||
Values: []string{kube.NodeOSWindows},
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
})
|
||||
|
||||
toleration = append(toleration, []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
@@ -683,11 +704,15 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
}
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
|
||||
podOS.Name = kube.NodeOSLinux
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: kube.NodeOSLabel,
|
||||
Values: []string{kube.NodeOSWindows},
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
})
|
||||
}
|
||||
|
||||
var podAffinity *corev1api.Affinity
|
||||
if len(intoleratableNodes) > 0 {
|
||||
if affinity == nil {
|
||||
affinity = &kube.LoadAffinity{}
|
||||
@@ -700,9 +725,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
})
|
||||
}
|
||||
|
||||
if affinity != nil {
|
||||
podAffinity = kube.ToSystemAffinity([]*kube.LoadAffinity{affinity})
|
||||
}
|
||||
podAffinity := kube.ToSystemAffinity(affinity, volumeTopology)
|
||||
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -154,6 +154,7 @@ func TestCreateBackupPodWithPriorityClass(t *testing.T) {
|
||||
kube.NodeOSLinux,
|
||||
tc.expectedPriorityClass,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tc.description)
|
||||
@@ -239,6 +240,7 @@ func TestCreateBackupPodWithMissingConfigMap(t *testing.T) {
|
||||
kube.NodeOSLinux,
|
||||
"", // empty priority class since config map is missing
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Should succeed even when config map is missing
|
||||
|
||||
@@ -68,6 +68,12 @@ func TestExpose(t *testing.T) {
|
||||
|
||||
var restoreSize int64 = 123456
|
||||
|
||||
scObj := &storagev1api.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-sc",
|
||||
},
|
||||
}
|
||||
|
||||
snapshotClass := "fake-snapshot-class"
|
||||
vsObject := &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -199,6 +205,18 @@ func TestExpose(t *testing.T) {
|
||||
expectedAffinity *corev1api.Affinity
|
||||
expectedPVCAnnotation map[string]string
|
||||
}{
|
||||
{
|
||||
name: "get volume topology fail",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
err: "error getting volume topology for PV fake-pv, storage class fake-sc: error getting storage class fake-sc: storageclasses.storage.k8s.io \"fake-sc\" not found",
|
||||
},
|
||||
{
|
||||
name: "wait vs ready fail",
|
||||
ownerBackup: backup,
|
||||
@@ -206,6 +224,11 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error wait volume snapshot ready: error to get VolumeSnapshot /fake-vs: volumesnapshots.snapshot.storage.k8s.io \"fake-vs\" not found",
|
||||
},
|
||||
@@ -217,10 +240,15 @@ func TestExpose(t *testing.T) {
|
||||
SourceNamespace: "fake-ns",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to get volume snapshot content: error getting volume snapshot content from API: volumesnapshotcontents.snapshot.storage.k8s.io \"fake-vsc\" not found",
|
||||
},
|
||||
{
|
||||
@@ -231,6 +259,8 @@ func TestExpose(t *testing.T) {
|
||||
SourceNamespace: "fake-ns",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -245,6 +275,9 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to delete volume snapshot: error to delete volume snapshot: fake-delete-error",
|
||||
},
|
||||
{
|
||||
@@ -255,6 +288,8 @@ func TestExpose(t *testing.T) {
|
||||
SourceNamespace: "fake-ns",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -269,6 +304,9 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to delete volume snapshot content: error to delete volume snapshot content: fake-delete-error",
|
||||
},
|
||||
{
|
||||
@@ -279,6 +317,8 @@ func TestExpose(t *testing.T) {
|
||||
SourceNamespace: "fake-ns",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -293,6 +333,9 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to create backup volume snapshot: fake-create-error",
|
||||
},
|
||||
{
|
||||
@@ -303,6 +346,8 @@ func TestExpose(t *testing.T) {
|
||||
SourceNamespace: "fake-ns",
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -317,6 +362,9 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to create backup volume snapshot content: fake-create-error",
|
||||
},
|
||||
{
|
||||
@@ -326,11 +374,16 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
AccessMode: "fake-mode",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to create backup pvc: unsupported access mode fake-mode",
|
||||
},
|
||||
{
|
||||
@@ -342,6 +395,8 @@ func TestExpose(t *testing.T) {
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
AccessMode: AccessModeFileSystem,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -356,6 +411,9 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObj,
|
||||
},
|
||||
err: "error to create backup pvc: error to create pvc: fake-create-error",
|
||||
},
|
||||
{
|
||||
@@ -367,6 +425,8 @@ func TestExpose(t *testing.T) {
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -374,6 +434,7 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
kubeReactors: []reactor{
|
||||
{
|
||||
@@ -395,6 +456,8 @@ func TestExpose(t *testing.T) {
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -402,6 +465,24 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -413,6 +494,8 @@ func TestExpose(t *testing.T) {
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
@@ -420,6 +503,24 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -432,6 +533,8 @@ func TestExpose(t *testing.T) {
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
VolumeSize: *resource.NewQuantity(567890, ""),
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObjectWithoutRestoreSize,
|
||||
@@ -439,8 +542,26 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedVolumeSize: resource.NewQuantity(567890, ""),
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backupPod mounts read only backupPVC",
|
||||
@@ -449,6 +570,7 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
@@ -465,8 +587,26 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedReadOnlyPVC: true,
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backupPod mounts read only backupPVC and storageClass specified in backupPVC config",
|
||||
@@ -475,6 +615,7 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
@@ -491,9 +632,27 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedReadOnlyPVC: true,
|
||||
expectedBackupPVCStorageClass: "fake-sc-read-only",
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backupPod mounts backupPVC with storageClass specified in backupPVC config",
|
||||
@@ -502,6 +661,7 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
@@ -517,8 +677,26 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedBackupPVCStorageClass: "fake-sc-read-only",
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Affinity per StorageClass",
|
||||
@@ -527,6 +705,7 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
@@ -551,6 +730,7 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
@@ -563,6 +743,11 @@ func TestExpose(t *testing.T) {
|
||||
Operator: corev1api.NodeSelectorOpIn,
|
||||
Values: []string{"Linux"},
|
||||
},
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -577,6 +762,7 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
@@ -606,6 +792,7 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedBackupPVCStorageClass: "fake-sc-read-only",
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
@@ -619,6 +806,11 @@ func TestExpose(t *testing.T) {
|
||||
Operator: corev1api.NodeSelectorOpIn,
|
||||
Values: []string{"amd64"},
|
||||
},
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -633,6 +825,7 @@ func TestExpose(t *testing.T) {
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
StorageClass: "fake-sc",
|
||||
SourcePVName: "fake-pv",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
@@ -649,9 +842,26 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedBackupPVCStorageClass: "fake-sc-read-only",
|
||||
expectedAffinity: nil,
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get source node fail",
|
||||
@@ -677,6 +887,7 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
kubeReactors: []reactor{
|
||||
{
|
||||
@@ -687,7 +898,23 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedAffinity: nil,
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPVCAnnotation: nil,
|
||||
},
|
||||
{
|
||||
@@ -714,8 +941,25 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
scObj,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedAffinity: nil,
|
||||
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
{
|
||||
@@ -744,6 +988,7 @@ func TestExpose(t *testing.T) {
|
||||
daemonSet,
|
||||
volumeAttachement1,
|
||||
volumeAttachement2,
|
||||
scObj,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
@@ -751,6 +996,11 @@ func TestExpose(t *testing.T) {
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"windows"},
|
||||
},
|
||||
{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
@@ -844,6 +1094,8 @@ func TestExpose(t *testing.T) {
|
||||
|
||||
if test.expectedAffinity != nil {
|
||||
assert.Equal(t, test.expectedAffinity, backupPod.Spec.Affinity)
|
||||
} else {
|
||||
assert.Nil(t, backupPod.Spec.Affinity)
|
||||
}
|
||||
|
||||
if test.expectedPVCAnnotation != nil {
|
||||
|
||||
@@ -493,13 +493,15 @@ func (e *genericRestoreExposer) createRestorePod(
|
||||
containerName := string(ownerObject.UID)
|
||||
volumeName := string(ownerObject.UID)
|
||||
|
||||
var podAffinity *corev1api.Affinity
|
||||
if selectedNode == "" {
|
||||
e.log.Infof("No selected node for restore pod. Try to get affinity from the node-agent config.")
|
||||
nodeSelector := map[string]string{}
|
||||
if selectedNode != "" {
|
||||
affinity = nil
|
||||
nodeSelector["kubernetes.io/hostname"] = selectedNode
|
||||
e.log.Infof("Selected node for restore pod. Ignore affinity from the node-agent config.")
|
||||
}
|
||||
|
||||
if affinity != nil {
|
||||
podAffinity = kube.ToSystemAffinity([]*kube.LoadAffinity{affinity})
|
||||
}
|
||||
if affinity == nil {
|
||||
affinity = &kube.LoadAffinity{}
|
||||
}
|
||||
|
||||
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, nodeOS)
|
||||
@@ -566,7 +568,6 @@ func (e *genericRestoreExposer) createRestorePod(
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1api.PodOS{}
|
||||
if nodeOS == kube.NodeOSWindows {
|
||||
userID := "ContainerAdministrator"
|
||||
@@ -576,9 +577,14 @@ func (e *genericRestoreExposer) createRestorePod(
|
||||
},
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
|
||||
podOS.Name = kube.NodeOSWindows
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: kube.NodeOSLabel,
|
||||
Values: []string{kube.NodeOSWindows},
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
})
|
||||
|
||||
toleration = append(toleration, []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
@@ -599,10 +605,17 @@ func (e *genericRestoreExposer) createRestorePod(
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
|
||||
podOS.Name = kube.NodeOSLinux
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: kube.NodeOSLabel,
|
||||
Values: []string{kube.NodeOSWindows},
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
})
|
||||
}
|
||||
|
||||
podAffinity := kube.ToSystemAffinity(affinity, nil)
|
||||
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: restorePodName,
|
||||
@@ -656,7 +669,6 @@ func (e *genericRestoreExposer) createRestorePod(
|
||||
ServiceAccountName: podInfo.serviceAccount,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Volumes: volumes,
|
||||
NodeName: selectedNode,
|
||||
RestartPolicy: corev1api.RestartPolicyNever,
|
||||
SecurityContext: securityCtx,
|
||||
Tolerations: toleration,
|
||||
|
||||
@@ -434,6 +434,8 @@ func (e *podVolumeExposer) createHostingPod(
|
||||
args = append(args, podInfo.logFormatArgs...)
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
affinity := &kube.LoadAffinity{}
|
||||
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
var containerSecurityCtx *corev1api.SecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
@@ -446,9 +448,14 @@ func (e *podVolumeExposer) createHostingPod(
|
||||
},
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
|
||||
podOS.Name = kube.NodeOSWindows
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: kube.NodeOSLabel,
|
||||
Values: []string{kube.NodeOSWindows},
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
})
|
||||
|
||||
toleration = append(toleration, []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
@@ -472,10 +479,17 @@ func (e *podVolumeExposer) createHostingPod(
|
||||
Privileged: &privileged,
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
|
||||
podOS.Name = kube.NodeOSLinux
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: kube.NodeOSLabel,
|
||||
Values: []string{kube.NodeOSWindows},
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
})
|
||||
}
|
||||
|
||||
podAffinity := kube.ToSystemAffinity(affinity, nil)
|
||||
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hostingPodName,
|
||||
@@ -495,6 +509,7 @@ func (e *podVolumeExposer) createHostingPod(
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeSelector: nodeSelector,
|
||||
OS: &podOS,
|
||||
Affinity: podAffinity,
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
|
||||
@@ -235,12 +235,28 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1api.DaemonSet
|
||||
if c.forWindows {
|
||||
daemonSet.Spec.Template.Spec.SecurityContext = nil
|
||||
daemonSet.Spec.Template.Spec.Containers[0].SecurityContext = nil
|
||||
daemonSet.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1api.PodOS{
|
||||
Name: "windows",
|
||||
}
|
||||
|
||||
daemonSet.Spec.Template.Spec.Affinity = &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Values: []string{"windows"},
|
||||
Operator: corev1api.NodeSelectorOpIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
daemonSet.Spec.Template.Spec.Tolerations = []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
@@ -256,11 +272,22 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1api.DaemonSet
|
||||
},
|
||||
}
|
||||
} else {
|
||||
daemonSet.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "linux",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1api.PodOS{
|
||||
Name: "linux",
|
||||
daemonSet.Spec.Template.Spec.Affinity = &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Values: []string{"windows"},
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -34,8 +34,23 @@ func TestDaemonSet(t *testing.T) {
|
||||
assert.Equal(t, "velero", ds.ObjectMeta.Namespace)
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["name"])
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["role"])
|
||||
assert.Equal(t, "linux", ds.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "linux", string(ds.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Values: []string{"windows"},
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, ds.Spec.Template.Spec.Affinity)
|
||||
assert.Equal(t, corev1api.PodSecurityContext{RunAsUser: &userID}, *ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, corev1api.SecurityContext{Privileged: &boolFalse}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, ds.Spec.Template.Spec.Volumes, 3)
|
||||
@@ -80,8 +95,24 @@ func TestDaemonSet(t *testing.T) {
|
||||
assert.Equal(t, "velero", ds.ObjectMeta.Namespace)
|
||||
assert.Equal(t, "node-agent-windows", ds.Spec.Template.ObjectMeta.Labels["name"])
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["role"])
|
||||
assert.Equal(t, "windows", ds.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "windows", string(ds.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Values: []string{"windows"},
|
||||
Operator: corev1api.NodeSelectorOpIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, ds.Spec.Template.Spec.Affinity)
|
||||
assert.Equal(t, (*corev1api.PodSecurityContext)(nil), ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, (*corev1api.SecurityContext)(nil), ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
||||
@@ -364,12 +364,26 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1api.Deployme
|
||||
Spec: corev1api.PodSpec{
|
||||
RestartPolicy: corev1api.RestartPolicyAlways,
|
||||
ServiceAccountName: c.serviceAccountName,
|
||||
NodeSelector: map[string]string{
|
||||
"kubernetes.io/os": "linux",
|
||||
},
|
||||
OS: &corev1api.PodOS{
|
||||
Name: "linux",
|
||||
},
|
||||
Affinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Values: []string{"windows"},
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "velero",
|
||||
|
||||
@@ -100,8 +100,23 @@ func TestDeployment(t *testing.T) {
|
||||
assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 2)
|
||||
assert.Equal(t, "--repo-maintenance-job-configmap=test-repo-maintenance-config", deploy.Spec.Template.Spec.Containers[0].Args[1])
|
||||
|
||||
assert.Equal(t, "linux", deploy.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "linux", string(deploy.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Values: []string{"windows"},
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, deploy.Spec.Template.Spec.Affinity)
|
||||
}
|
||||
|
||||
func TestDeploymentWithPriorityClassName(t *testing.T) {
|
||||
|
||||
@@ -80,6 +80,9 @@ const (
|
||||
DataDownloadFailureTotal = "data_download_failure_total"
|
||||
DataDownloadCancelTotal = "data_download_cancel_total"
|
||||
|
||||
// schedule metrics
|
||||
scheduleExpectedIntervalSeconds = "schedule_expected_interval_seconds"
|
||||
|
||||
// repo maintenance metrics
|
||||
repoMaintenanceSuccessTotal = "repo_maintenance_success_total"
|
||||
repoMaintenanceFailureTotal = "repo_maintenance_failure_total"
|
||||
@@ -347,6 +350,14 @@ func NewServerMetrics() *ServerMetrics {
|
||||
},
|
||||
[]string{scheduleLabel, backupNameLabel},
|
||||
),
|
||||
scheduleExpectedIntervalSeconds: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: scheduleExpectedIntervalSeconds,
|
||||
Help: "Expected interval between consecutive scheduled backups, in seconds",
|
||||
},
|
||||
[]string{scheduleLabel},
|
||||
),
|
||||
repoMaintenanceSuccessTotal: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
@@ -644,6 +655,9 @@ func (m *ServerMetrics) RemoveSchedule(scheduleName string) {
|
||||
if c, ok := m.metrics[csiSnapshotFailureTotal].(*prometheus.CounterVec); ok {
|
||||
c.DeleteLabelValues(scheduleName, "")
|
||||
}
|
||||
if g, ok := m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec); ok {
|
||||
g.DeleteLabelValues(scheduleName)
|
||||
}
|
||||
}
|
||||
|
||||
// InitMetricsForNode initializes counter metrics for a node.
|
||||
@@ -758,6 +772,14 @@ func (m *ServerMetrics) SetBackupLastSuccessfulTimestamp(backupSchedule string,
|
||||
}
|
||||
}
|
||||
|
||||
// SetScheduleExpectedIntervalSeconds records the expected interval in seconds,
|
||||
// between consecutive backups for a schedule.
|
||||
func (m *ServerMetrics) SetScheduleExpectedIntervalSeconds(scheduleName string, seconds float64) {
|
||||
if g, ok := m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec); ok {
|
||||
g.WithLabelValues(scheduleName).Set(seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBackupTotal records the current number of existent backups.
|
||||
func (m *ServerMetrics) SetBackupTotal(numberOfBackups int64) {
|
||||
if g, ok := m.metrics[backupTotal].(prometheus.Gauge); ok {
|
||||
|
||||
@@ -259,6 +259,90 @@ func TestMultipleAdhocBackupsShareMetrics(t *testing.T) {
|
||||
assert.Equal(t, float64(1), validationFailureMetric, "All adhoc validation failures should be counted together")
|
||||
}
|
||||
|
||||
// TestSetScheduleExpectedIntervalSeconds verifies that the expected interval metric
|
||||
// is properly recorded for schedules.
|
||||
func TestSetScheduleExpectedIntervalSeconds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scheduleName string
|
||||
intervalSeconds float64
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "every 5 minutes schedule",
|
||||
scheduleName: "frequent-backup",
|
||||
intervalSeconds: 300,
|
||||
description: "Expected interval should be 5m in seconds",
|
||||
},
|
||||
{
|
||||
name: "daily schedule",
|
||||
scheduleName: "daily-backup",
|
||||
intervalSeconds: 86400,
|
||||
description: "Expected interval should be 24h in seconds",
|
||||
},
|
||||
{
|
||||
name: "monthly schedule",
|
||||
scheduleName: "monthly-backup",
|
||||
intervalSeconds: 2678400, // 31 days in seconds
|
||||
description: "Expected interval should be 31 days in seconds",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := NewServerMetrics()
|
||||
m.SetScheduleExpectedIntervalSeconds(tc.scheduleName, tc.intervalSeconds)
|
||||
|
||||
metric := getMetricValue(t, m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec), tc.scheduleName)
|
||||
assert.Equal(t, tc.intervalSeconds, metric, tc.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestScheduleExpectedIntervalNotInitializedByDefault verifies that the expected
|
||||
// interval metric is not initialized by InitSchedule, so it only appears for
|
||||
// schedules with a valid cron expression.
|
||||
func TestScheduleExpectedIntervalNotInitializedByDefault(t *testing.T) {
|
||||
m := NewServerMetrics()
|
||||
m.InitSchedule("test-schedule")
|
||||
|
||||
// The metric should not have any values after InitSchedule
|
||||
ch := make(chan prometheus.Metric, 1)
|
||||
m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec).Collect(ch)
|
||||
close(ch)
|
||||
|
||||
count := 0
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
assert.Equal(t, 0, count, "scheduleExpectedIntervalSeconds should not be initialized by InitSchedule")
|
||||
}
|
||||
|
||||
// TestRemoveScheduleCleansUpExpectedInterval verifies that RemoveSchedule
|
||||
// cleans up the expected interval metric.
|
||||
func TestRemoveScheduleCleansUpExpectedInterval(t *testing.T) {
|
||||
m := NewServerMetrics()
|
||||
m.InitSchedule("test-schedule")
|
||||
m.SetScheduleExpectedIntervalSeconds("test-schedule", 3600)
|
||||
|
||||
// Verify metric exists
|
||||
metric := getMetricValue(t, m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec), "test-schedule")
|
||||
assert.Equal(t, float64(3600), metric)
|
||||
|
||||
// Remove schedule and verify metric is cleaned up
|
||||
m.RemoveSchedule("test-schedule")
|
||||
|
||||
ch := make(chan prometheus.Metric, 1)
|
||||
m.metrics[scheduleExpectedIntervalSeconds].(*prometheus.GaugeVec).Collect(ch)
|
||||
close(ch)
|
||||
|
||||
count := 0
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
assert.Equal(t, 0, count, "scheduleExpectedIntervalSeconds should be removed after RemoveSchedule")
|
||||
}
|
||||
|
||||
// TestInitScheduleWithEmptyName verifies that InitSchedule works correctly
|
||||
// with an empty schedule name (for adhoc backups).
|
||||
func TestInitScheduleWithEmptyName(t *testing.T) {
|
||||
|
||||
@@ -149,7 +149,8 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
|
||||
// if there are any slashes in the middle of 'bucket', the user
|
||||
// probably put <bucket>/<prefix> in the bucket field, which we
|
||||
// don't support.
|
||||
if strings.Contains(bucket, "/") {
|
||||
// Exception: MRAP ARNs (arn:aws:s3::...) legitimately contain slashes.
|
||||
if strings.Contains(bucket, "/") && !strings.HasPrefix(bucket, "arn:aws:s3:") {
|
||||
return nil, errors.Errorf("backup storage location's bucket name %q must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", location.Spec.ObjectStorage.Bucket)
|
||||
}
|
||||
|
||||
|
||||
@@ -943,6 +943,24 @@ func TestNewObjectBackupStoreGetter(t *testing.T) {
|
||||
wantBucket: "bucket",
|
||||
wantPrefix: "prefix/",
|
||||
},
|
||||
{
|
||||
name: "when the Bucket field is an MRAP ARN, it should be valid",
|
||||
location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap").Result(),
|
||||
objectStoreGetter: objectStoreGetter{
|
||||
"provider-1": newInMemoryObjectStore("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap"),
|
||||
},
|
||||
credFileStore: velerotest.NewFakeCredentialsFileStore("", nil),
|
||||
wantBucket: "arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap",
|
||||
},
|
||||
{
|
||||
name: "when the Bucket field is an MRAP ARN with trailing slash, it should be valid and trimmed",
|
||||
location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap/").Result(),
|
||||
objectStoreGetter: objectStoreGetter{
|
||||
"provider-1": newInMemoryObjectStore("arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap"),
|
||||
},
|
||||
credFileStore: velerotest.NewFakeCredentialsFileStore("", nil),
|
||||
wantBucket: "arn:aws:s3::123456789012:accesspoint/abcdef0123456.mrap",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -210,11 +210,9 @@ func resultsKey(ns, name string) string {
|
||||
|
||||
func (b *backupper) getMatchAction(resPolicies *resourcepolicies.Policies, pvc *corev1api.PersistentVolumeClaim, volume *corev1api.Volume) (*resourcepolicies.Action, error) {
|
||||
if pvc != nil {
|
||||
pv := new(corev1api.PersistentVolume)
|
||||
err := b.crClient.Get(context.TODO(), ctrlclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting pv for pvc %s", pvc.Spec.VolumeName)
|
||||
}
|
||||
// Ignore err, if the PV is not available (Pending/Lost PVC or PV fetch failed) - try matching with PVC only
|
||||
// GetPVForPVC returns nil for all error cases
|
||||
pv, _ := kube.GetPVForPVC(pvc, b.crClient)
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
return resPolicies.GetMatchAction(vfd)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user