Compare commits

...

44 Commits

Author SHA1 Message Date
lyndon-li
8afe3cea8b Merge pull request #8124 from Lyndon-Li/release-1.14
Change log for 1.14.1
2024-08-20 09:51:03 +08:00
Lyndon-Li
6ae1582113 change log for 1.14.1
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-08-19 13:35:48 +08:00
lyndon-li
69d950ed77 Merge pull request #8118 from blackpiglet/fix_cve_for_1.14
Bump base image and golang version to fix CVEs.
2024-08-16 10:10:29 +08:00
Xun Jiang
fb146cd2dd Bump base image and golang version to fix CVEs.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2024-08-15 20:42:09 +08:00
Shubham Pampattiwar
cd601cafdf Merge pull request #8037 from mrnold/pod-volume-message-7857-1.14
[release-1.14] Avoid wrapping failed PVB status with empty message.
2024-07-24 08:08:11 -07:00
Matthew Arnold
76a3ecc999 Add changelog file.
Signed-off-by: Matthew Arnold <marnold@redhat.com>
2024-07-24 09:21:20 -04:00
Matthew Arnold
3dde086435 Avoid wrapping failed PVB status with empty message.
Also change "get" to "found" as requested in issue #7857.

Signed-off-by: Matthew Arnold <marnold@redhat.com>
2024-07-24 09:21:20 -04:00
lyndon-li
ebafe12cb0 Merge pull request #8035 from shubham-pampattiwar/expose-pv-patch-max-timeout-rel-1.14
[release-1.14] Expose PVPatchMaximumDuration timeout for custom configuration
2024-07-23 10:13:59 +08:00
Shubham Pampattiwar
fea63167bc Expose PVPatchMaximumDuration timeout for custom configuration
Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

remove debug log

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

use resource timeout server arg

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

add changelog

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>

remove hardcoded PVPatchMaximumtimeout const usagDe

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
(cherry picked from commit fd6c74715a)

update changelog filename

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2024-07-22 08:27:13 -07:00
Shubham Pampattiwar
6ae34a13aa Merge pull request #8016 from sseago/plugin-leak-1.14
[release-1.14] Reuse existing plugin manager for get/put volume info
2024-07-16 09:26:03 -07:00
Scott Seago
89a536382b update changelog filename
Signed-off-by: Scott Seago <sseago@redhat.com>
2024-07-16 11:03:30 -04:00
lyndon-li
54df263094 fix linter check error (#8014)
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
Signed-off-by: Scott Seago <sseago@redhat.com>
2024-07-16 11:03:22 -04:00
Scott Seago
386fbb1ea6 Reuse existing plugin manager for get/put volume info
Signed-off-by: Scott Seago <sseago@redhat.com>
2024-07-15 14:22:43 -04:00
lyndon-li
0c38aa26bc Merge pull request #8006 from shubham-pampattiwar/skip-pv-patch-wffc-1.14
Skip PV patch step in Restore workflow for WaitForFirstConsumer VolumeBindingMode Pending state PVCs (#7953)
2024-07-15 14:39:17 +08:00
Shubham Pampattiwar
7cb5b96019 Skip PV patch step in Restoe workflow for WaitForFirstConsumer VolumeBindingMode Pending state PVCs (#7953)
add changelog file

change log level and add more detailed comments

make update

add return for sc get call if error

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
(cherry picked from commit 3bd8a7da7d)

update changelog

Signed-off-by: Shubham Pampattiwar <spampatt@redhat.com>
2024-07-12 11:38:16 -07:00
Anshul Ahuja
1c7412db85 Reset VolumeSnapshotRef in Backup Sync Flow (#8004)
Signed-off-by: Anshul Ahuja <anshulahuja@microsoft.com>
Co-authored-by: Anshul Ahuja <anshulahuja@microsoft.com>
2024-07-12 13:20:25 +05:30
lyndon-li
80a5b51c2e Merge pull request #7998 from blackpiglet/7928_fix_1.14
Check whether the namespaces specified in namespace filter exist.
2024-07-12 10:57:35 +08:00
Xun Jiang
57377f9d69 Check whether the namespaces specified in namespace filter exist.
Check whether the namespaces specified in the
backup.Spec.IncludeNamespaces exist during backup resource collcetion
If not, log error to mark the backup as PartiallyFailed.

Signed-off-by: Xun Jiang <blackpigletbruce@gmail.com>
2024-07-11 14:04:19 +08:00
Wenkai Yin(尹文开)
163ee42067 Merge pull request #7976 from blackpiglet/7929_1.14_fix
[cherry-pick][1.14]Check whether the volume's source is PVC before fetching its PV.
2024-07-05 10:03:56 +08:00
Xun Jiang
dcf4e1f10b Check whether the volume's source is PVC before fetching its PV.
Signed-off-by: Xun Jiang <blackpigletbruce@gmail.com>
2024-07-03 18:31:28 +08:00
Xun Jiang/Bruce Jiang
77b3c8f10d Merge pull request #7944 from blackpiglet/7818_fix
Expose the VolumeHelper to third-party plugins.
2024-07-03 10:58:11 +08:00
Xun Jiang
1fd959d752 Expose the VolumeHelper to third-party plugins.
Signed-off-by: Xun Jiang <blackpigletbruce@gmail.com>
2024-07-02 17:15:47 +08:00
Xun Jiang/Bruce Jiang
5c413ec3a9 Merge pull request #7949 from Lyndon-Li/release-1.14
[1.14] Issue 7903: and limitation clarification for change PVC selected-node feature
2024-07-01 09:54:36 +08:00
Lyndon-Li
4056a969a9 issue 7904:and limitation clarification for change PVC selected-node feature
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-06-28 13:45:55 +08:00
lyndon-li
583cd8b2e9 Merge pull request #7926 from sseago/unschedulable-backport
[release-1.14] Don't consider unschedulable pods unrecoverable
2024-06-26 10:15:07 +08:00
Scott Seago
d21d413829 Don't consider unschedulable pods unrecoverable
Signed-off-by: Scott Seago <sseago@redhat.com>
2024-06-25 14:57:51 -04:00
lyndon-li
2fc6300f22 Merge pull request #7860 from blackpiglet/update_e2e_for_1_14
Skip parallel files upload and download test for Restic case
2024-06-13 10:08:32 +08:00
Xun Jiang/Bruce Jiang
200f16e539 Merge branch 'release-1.14' into update_e2e_for_1_14 2024-06-12 21:25:19 +08:00
Xun Jiang/Bruce Jiang
0d3657240a Merge pull request #7876 from reasonerjt/update-release-note-1.14
Update release note of 1.14
2024-06-12 20:14:23 +08:00
Xun Jiang/Bruce Jiang
08fea6e994 Merge branch 'release-1.14' into update-release-note-1.14 2024-06-12 20:04:13 +08:00
Xun Jiang
d20bd165a9 Skip parallel files upload and download test for Restic case.
Signed-off-by: Xun Jiang <blackpigletbruce@gmail.com>
2024-06-12 19:52:22 +08:00
Xun Jiang/Bruce Jiang
bf778c7d21 Merge pull request #7875 from reasonerjt/fix-restore-crash-1.14
Add checks for csisnapshot for vol_info population
2024-06-12 19:41:14 +08:00
Daniel Jiang
a65005996a Update release note of 1.14
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-06-12 19:13:11 +08:00
Daniel Jiang
f61c8b9042 Add checks for csisnapshot for vol_info population
fixes #7874

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-06-12 18:57:29 +08:00
Daniel Jiang
21366795d1 Merge pull request #7852 from reasonerjt/fix-7849-1.14
Use PVC to track the CSI snapshot in restore
2024-06-04 13:33:27 +08:00
Daniel Jiang
f6367ca396 Use PVC to track the CSI snapshot in restore
This commit fixes #7849.
It will use PVC instead of PV to track CSI snapshots to generate restore
volume info metadata.  So that in the case the PVC is not bound to PV
the metadata can be populated correctly.

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-06-04 10:10:36 +08:00
Daniel Jiang
ce16acb12e Merge pull request #7847 from Lyndon-Li/release-1.14
Avoid unnecessary repo connect for maintenance
2024-05-31 13:11:46 +08:00
Lyndon-Li
54d5dabdda avoid unnecessary repo connect for maintenance
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-05-31 11:20:12 +08:00
Daniel Jiang
952f713b3b Merge pull request #7830 from reasonerjt/fix-git-release-issue-1.14
Fix issue in "git status" in goreleaser.sh
2024-05-27 17:01:51 +08:00
Daniel Jiang
1f1ccab948 Fix issue in "git status" in goreleaser.sh
When dry-run the tag-release.sh, there's an error
"fatal: detected dubious ownership in repository at
'/github.com/vmware-tanzu/velero'"

This commit works around this issue to make sure "tag-release.sh"
can finish successful

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-05-27 16:49:47 +08:00
Daniel Jiang
9164bc95a4 Merge pull request #7821 from reasonerjt/pin-image-1.14
Pin the version of Golang and base image for v1.14.0
2024-05-24 13:48:14 +08:00
Daniel Jiang
74966d0e2c Pin the version of Golang and base image
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-05-24 13:25:23 +08:00
Daniel Jiang
7d22548d7a Merge pull request #7824 from reasonerjt/fix-codespell-1.14
Fix the problems found by codespell
2024-05-24 13:24:34 +08:00
Daniel Jiang
892fa79051 Fix the problems found by codespell
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-05-24 11:32:26 +08:00
50 changed files with 425 additions and 286 deletions

View File

@@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.6'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI

View File

@@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.6'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI
@@ -82,7 +82,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.6'
id: go
- name: Check out the code
uses: actions/checkout@v4

View File

@@ -10,7 +10,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.6'
id: go
- name: Check out the code
uses: actions/checkout@v4

View File

@@ -15,7 +15,7 @@ jobs:
with:
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast,notin,sme
check_filenames: true
check_hidden: true

View File

@@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.6'
id: go
- uses: actions/checkout@v4

View File

@@ -5,7 +5,7 @@
We as members, contributors, and leaders pledge to make participation in the Velero project and our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
identity and expression, level of experience, education, socioeconomic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.

View File

@@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as velero-builder
FROM --platform=$BUILDPLATFORM golang:1.22.6-bookworm as velero-builder
ARG GOPROXY
ARG BIN
@@ -47,7 +47,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as restic-builder
FROM --platform=$BUILDPLATFORM golang:1.22.6-bookworm as restic-builder
ARG BIN
ARG TARGETOS
@@ -70,7 +70,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Velero image packing section
FROM paketobuildpacks/run-jammy-tiny:latest
FROM paketobuildpacks/run-jammy-tiny:0.2.46
LABEL maintainer="Xun Jiang <jxun@vmware.com>"

View File

@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.22 as tilt-helper
FROM golang:1.22.6 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@@ -1,3 +1,29 @@
## v1.14.1
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.14.1
### Container Image
`velero/velero:v1.14.1`
### Documentation
https://velero.io/docs/v1.14/
### Upgrading
https://velero.io/docs/v1.14/upgrade-to-1.14/
### All Changes
* Avoid wrapping failed PVB status with empty message. (#8037, @mrnold)
* Make PVPatchMaximumDuration timeout configurable (#8035, @shubham-pampattiwar)
* Reuse existing plugin manager for get/put volume info (#8016, @sseago)
* Skip PV patch step in Restoe workflow for WaitForFirstConsumer VolumeBindingMode Pending state PVCs (#8006, @shubham-pampattiwar)
* Check whether the namespaces specified in namespace filter exist. (#7998, @blackpiglet)
* Check whether the volume's source is PVC before fetching its PV. (#7976, @blackpiglet)
* Fix issue #7904, add the limitation clarification for change PVC selected-node feature (#7949, @Lyndon-Li)
* Expose the VolumeHelper to third-party plugins. (#7944, @blackpiglet)
* Don't consider unschedulable pods unrecoverable (#7926, @sseago)
## v1.14
### Download
@@ -14,7 +40,7 @@ https://velero.io/docs/v1.14/upgrade-to-1.14/
### Highlights
#### The maintenance work for kopia backup repositories is run in jobs
#### The maintenance work for kopia/restic backup repositories is run in jobs
Since velero started using kopia as the approach for filesystem-level backup/restore, we've noticed an issue when velero connects to the kopia backup repositories and performs maintenance, it sometimes consumes excessive memory that can cause the velero pod to get OOM Killed. To mitigate this issue, the maintenance work will be moved out of velero pod to a separate kubernetes job, and the user will be able to specify the resource request in "velero install".
#### Volume Policies are extended to support more actions to handle volumes
In an earlier release, a flexible volume policy was introduced to skip certain volumes from a backup. In v1.14 we've made enhancement to this policy to allow the user to set how the volumes should be backed up. The user will be able to set "fs-backup" or "snapshot" as value of “action" in the policy and velero will backup the volumes accordingly. This enhancement allows the user to achieve a fine-grained control like "opt-in/out" without having to update the target workload. For more details please refer to https://velero.io/docs/v1.14/resource-filtering/#supported-volumepolicy-actions
@@ -38,6 +64,7 @@ Besides the service principal with secret(password)-based authentication, Velero
* CSI plugin has been merged into velero repo in v1.14 release. It will be installed by default as an internal plugin, and should not be installed via "plugins " parameter in "velero install" command.
* The default resource requests and limitations for node agent are removed in v1.14, to make the node agent pods have the QoS class of "BestEffort", more details please refer to #7391
* There's a change in namespace filtering behavior during backup: In v1.14, when the includedNamespaces/excludedNamespaces fields are not set and the labelSelector/OrLabelSelectors are set in the backup spec, the backup will only include the namespaces which contain the resources that match the label selectors, while in previous releases all namespaces will be included in the backup with such settings. More details refer to #7105
* Patching the PV in the "Finalizing" state may cause the restore to be in "PartiallyFailed" state when the PV is blocked in "Pending" state, while in the previous release the restore may end up being in "Complete" state. For more details refer to #7866
### All Changes
* Fix backup log to show error string, not index (#7805, @piny940)

View File

@@ -76,7 +76,7 @@ volumePolicies:
- Update VolumePolicy action type validation to account for `fs-backup` and `snapshot` as valid VolumePolicy actions.
- Modifications needed for `fs-backup` action:
- Now based on the specification of volume policy on backup request we will decide whether to go via legacy pod annotations approach or the newer volume policy based fs-backup action approach.
- If there is a presence of volume policy(fs-backup/snapshot) on the backup request that matches as an action for a volume we use the newer volume policy approach to get the list of the volumes for `fs-backup` action
- If there is a presence of volume policy(fs-backup/snapshot) on the backup request that matches as an action for a volume we use the newer volume policy approach to get the list of the volumes for `fs-backup` action
- Else continue with the annotation based legacy approach workflow.
- Modifications needed for `snapshot` action:

View File

@@ -65,7 +65,7 @@ This page contains a pre-migration checklist for ensuring a repo migration goes
#### Updating Netlify
The settings for Netflify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
The settings for Netlify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
#### Communication strategy

View File

@@ -27,7 +27,7 @@ Moreover, we would like to create a general workflow to variations during the da
- Support different data accesses, i.e., file system level and block level
- Support different snapshot types, i.e., CSI snapshot, volume snapshot API from storage vendors
- Support different snapshot accesses, i.e., through PV generated from snapshots, and through direct access API from storage vendors
- Reuse the existing Velero generic data path as creatd in [Unified Repository design][1]
- Reuse the existing Velero generic data path as created in [Unified Repository design][1]
## Non-Goals

16
go.mod
View File

@@ -1,11 +1,11 @@
module github.com/vmware-tanzu/velero
go 1.22.0
go 1.22.6
require (
cloud.google.com/go/storage v1.40.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
@@ -42,9 +42,9 @@ require (
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
golang.org/x/mod v0.17.0
golang.org/x/net v0.24.0
golang.org/x/net v0.26.0
golang.org/x/oauth2 v0.19.0
golang.org/x/text v0.14.0
golang.org/x/text v0.16.0
google.golang.org/api v0.172.0
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
@@ -68,7 +68,7 @@ require (
cloud.google.com/go/compute v1.24.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.7 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
@@ -157,10 +157,10 @@ require (
go.opentelemetry.io/otel/trace v1.25.0 // indirect
go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/term v0.19.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect

38
go.sum
View File

@@ -50,10 +50,10 @@ cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2u
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
@@ -199,8 +199,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -649,8 +647,8 @@ github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfm
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -795,8 +793,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -883,8 +881,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -986,14 +984,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1003,8 +1001,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1071,8 +1069,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$TARGETPLATFORM golang:1.22-bookworm
FROM --platform=$TARGETPLATFORM golang:1.22.6-bookworm
ARG GOPROXY
@@ -99,3 +99,6 @@ RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/i
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
RUN chmod +x ./kubectl
RUN mv ./kubectl /usr/local/bin
# Fix the "dubious ownership" issue from git when running goreleaser.sh
RUN echo "[safe] \n\t directory = *" > /.gitconfig

View File

@@ -16,11 +16,16 @@ limitations under the License.
package resourcepolicies
import (
"context"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
type VolumeActionType string
@@ -148,7 +153,43 @@ func (p *Policies) Validate() error {
return nil
}
func GetResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
func GetResourcePoliciesFromBackup(
backup velerov1api.Backup,
client crclient.Client,
logger logrus.FieldLogger,
) (resourcePolicies *Policies, err error) {
if backup.Spec.ResourcePolicy != nil &&
strings.EqualFold(backup.Spec.ResourcePolicy.Kind, ConfigmapRefType) {
policiesConfigMap := &v1.ConfigMap{}
err = client.Get(
context.Background(),
crclient.ObjectKey{Namespace: backup.Namespace, Name: backup.Spec.ResourcePolicy.Name},
policiesConfigMap,
)
if err != nil {
logger.Errorf("Fail to get ResourcePolicies %s ConfigMap with error %s.",
backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error())
return nil, fmt.Errorf("fail to get ResourcePolicies %s ConfigMap with error %s",
backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error())
}
resourcePolicies, err = getResourcePoliciesFromConfig(policiesConfigMap)
if err != nil {
logger.Errorf("Fail to read ResourcePolicies from ConfigMap %s with error %s.",
backup.Namespace+"/"+backup.Name, err.Error())
return nil, fmt.Errorf("fail to read the ResourcePolicies from ConfigMap %s with error %s",
backup.Namespace+"/"+backup.Name, err.Error())
} else if err = resourcePolicies.Validate(); err != nil {
logger.Errorf("Fail to validate ResourcePolicies in ConfigMap %s with error %s.",
backup.Namespace+"/"+backup.Name, err.Error())
return nil, fmt.Errorf("fail to validate ResourcePolicies in ConfigMap %s with error %s",
backup.Namespace+"/"+backup.Name, err.Error())
}
}
return resourcePolicies, nil
}
func getResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
if cm == nil {
return nil, fmt.Errorf("could not parse config from nil configmap")
}

View File

@@ -182,7 +182,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
expectedAction: &Action{Type: "snapshot"},
},
{
name: "dismatch all policies",
name: "mismatch all policies",
volume: &structuredVolume{
capacity: *resource.NewQuantity(50<<30, resource.BinarySI),
storageClass: "ebs-sc",
@@ -231,7 +231,7 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
}
// Call the function and check for errors
resPolicies, err := GetResourcePoliciesFromConfig(cm)
resPolicies, err := getResourcePoliciesFromConfig(cm)
assert.Nil(t, err)
// Check that the returned resourcePolicies object contains the expected data
@@ -394,7 +394,7 @@ volumePolicies:
skip: true,
},
{
name: "dismatch volume by types",
name: "mismatch volume by types",
yamlData: `version: v1
volumePolicies:
- conditions:

View File

@@ -165,7 +165,7 @@ func TestNFSConditionMatch(t *testing.T) {
expectedMatch: true,
},
{
name: "server dismatch",
name: "server mismatch",
condition: &nfsCondition{&nFSVolumeSource{Server: "192.168.10.20", Path: ""}},
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: ""}, nil),
expectedMatch: false,

View File

@@ -662,17 +662,17 @@ type RestoreVolumeInfoTracker struct {
// map of PV name to the NativeSnapshotInfo from which the PV is restored
pvNativeSnapshotMap map[string]*NativeSnapshotInfo
// map of PV name to the CSISnapshot object from which the PV is restored
pvCSISnapshotMap map[string]snapshotv1api.VolumeSnapshot
datadownloadList *velerov2alpha1.DataDownloadList
pvrs []*velerov1api.PodVolumeRestore
// map of PVC object to the CSISnapshot object from which the PV is restored
// the key is in the form of $pvc-ns/$pvc-name
pvcCSISnapshotMap map[string]snapshotv1api.VolumeSnapshot
datadownloadList *velerov2alpha1.DataDownloadList
pvrs []*velerov1api.PodVolumeRestore
}
// Populate data objects in the tracker, which will be used to generate the RestoreVolumeInfo array in Result()
// The input param resourceList should be the final result of the restore.
func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourceList map[string][]string) {
pvcs := RestoredPVCFromRestoredResourceList(restoredResourceList)
t.Lock()
defer t.Unlock()
for item := range pvcs {
@@ -684,25 +684,26 @@ func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourc
log.WithError(err).Error("Failed to get PVC")
continue
}
if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" {
log.Info("PVC is not bound or has no volume name")
continue
}
pv := &corev1api.PersistentVolume{}
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
log.WithError(err).Error("Failed to get PV")
} else {
t.pvPvc.insert(*pv, pvcName, pvcNS)
}
// Collect the CSI VolumeSnapshot objects referenced by the restored PVCs,
if pvc.Spec.DataSource != nil && pvc.Spec.DataSource.Kind == "VolumeSnapshot" {
vs := &snapshotv1api.VolumeSnapshot{}
if err := t.client.Get(ctx, kbclient.ObjectKey{Namespace: pvcNS, Name: pvc.Spec.DataSource.Name}, vs); err != nil {
log.WithError(err).Error("Failed to get VolumeSnapshot")
} else {
t.pvCSISnapshotMap[pv.Name] = *vs
t.pvcCSISnapshotMap[pvc.Namespace+"/"+pvcName] = *vs
}
}
if pvc.Status.Phase == corev1api.ClaimBound && pvc.Spec.VolumeName != "" {
pv := &corev1api.PersistentVolume{}
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
log.WithError(err).Error("Failed to get PV")
} else {
t.pvPvc.insert(*pv, pvcName, pvcNS)
}
} else {
log.Warn("PVC is not bound or has no volume name")
continue
}
}
if err := t.client.List(ctx, t.datadownloadList, &kbclient.ListOptions{
Namespace: t.restore.Namespace,
@@ -761,21 +762,35 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
}
// Generate RestoreVolumeInfo for PVs restored from CSISnapshots
for pvName, csiSnapshot := range t.pvCSISnapshotMap {
for pvc, csiSnapshot := range t.pvcCSISnapshotMap {
n := strings.Split(pvc, "/")
if len(n) != 2 {
t.log.Warnf("Invalid PVC key '%s' in the pvc-CSISnapshot map, skip populating it to volume info", pvc)
continue
}
pvcNS, pvcName := n[0], n[1]
var restoreSize int64 = 0
if csiSnapshot.Status != nil && csiSnapshot.Status.RestoreSize != nil {
restoreSize = csiSnapshot.Status.RestoreSize.Value()
}
vscName := ""
if csiSnapshot.Spec.Source.VolumeSnapshotContentName != nil {
vscName = *csiSnapshot.Spec.Source.VolumeSnapshotContentName
}
volumeInfo := &RestoreVolumeInfo{
PVName: pvName,
PVCNamespace: pvcNS,
PVCName: pvcName,
SnapshotDataMoved: false,
RestoreMethod: CSISnapshot,
CSISnapshotInfo: &CSISnapshotInfo{
SnapshotHandle: csiSnapshot.Annotations[VolumeSnapshotHandleAnnotation],
Size: csiSnapshot.Status.RestoreSize.Value(),
Size: restoreSize,
Driver: csiSnapshot.Annotations[CSIDriverNameAnnotation],
VSCName: *csiSnapshot.Spec.Source.VolumeSnapshotContentName,
VSCName: vscName,
},
}
if pvcPVInfo := t.pvPvc.retrieve(pvName, "", ""); pvcPVInfo != nil {
volumeInfo.PVCName = pvcPVInfo.PVCName
volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace
if pvcPVInfo := t.pvPvc.retrieve("", pvcName, pvcNS); pvcPVInfo != nil {
volumeInfo.PVName = pvcPVInfo.PV.Name
}
volumeInfos = append(volumeInfos, volumeInfo)
}
@@ -829,7 +844,7 @@ func NewRestoreVolInfoTracker(restore *velerov1api.Restore, logger logrus.FieldL
data: make(map[string]pvcPvInfo),
},
pvNativeSnapshotMap: make(map[string]*NativeSnapshotInfo),
pvCSISnapshotMap: make(map[string]snapshotv1api.VolumeSnapshot),
pvcCSISnapshotMap: make(map[string]snapshotv1api.VolumeSnapshot),
datadownloadList: &velerov2alpha1.DataDownloadList{},
}
}

View File

@@ -933,7 +933,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
data: make(map[string]pvcPvInfo),
},
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{},
pvrs: []*velerov1api.PodVolumeRestore{},
},
@@ -968,8 +968,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
IOPS: "10000",
},
},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{},
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{},
pvrs: []*velerov1api.PodVolumeRestore{
builder.ForPodVolumeRestore("velero", "testRestore-1234").
PodNamespace("testNS").
@@ -1031,8 +1031,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
},
},
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
"testPV": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
"testNS/testPVC": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
ObjectMeta(
builder.WithAnnotations(VolumeSnapshotHandleAnnotation, "csi-snap-001",
CSIDriverNameAnnotation, "test-csi-driver"),
@@ -1101,7 +1101,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
},
},
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{
Items: []velerov2alpha1.DataDownload{
*builder.ForDataDownload("velero", "testDataDownload-1").

View File

@@ -143,20 +143,24 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
}
if v.volumePolicy != nil {
pvc, err := kubeutil.GetPVCForPodVolume(&volume, &pod, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
return false, err
}
pv, err := kubeutil.GetPVForPVC(pvc, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
return false, err
var resource interface{}
resource = &volume
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvc, err := kubeutil.GetPVCForPodVolume(&volume, &pod, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
return false, err
}
resource, err = kubeutil.GetPVForPVC(pvc, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
return false, err
}
}
action, err := v.volumePolicy.GetMatchAction(pv)
action, err := v.volumePolicy.GetMatchAction(resource)
if err != nil {
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
v.logger.WithError(err).Error("fail to get VolumePolicy match action for volume")
return false, err
}

View File

@@ -552,6 +552,32 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
shouldFSBackup: true,
expectedErr: false,
},
{
name: "Volume source is emptyDir, VolumePolicy match, return true and no error",
pod: builder.ForPod("ns", "pod-1").
Volumes(
&corev1.Volume{
Name: "",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}).Result(),
resourcePolicies: &resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"volumeTypes": []string{"emptyDir"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.FSBackup,
},
},
},
},
shouldFSBackup: true,
expectedErr: false,
},
{
name: "VolumePolicy match, action type is not fs-backup, return false and no error",
pod: builder.ForPod("ns", "pod-1").

View File

@@ -40,6 +40,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/label"
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
"github.com/vmware-tanzu/velero/pkg/plugin/utils/volumehelper"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2"
uploaderUtil "github.com/vmware-tanzu/velero/pkg/uploader/util"
@@ -229,6 +230,22 @@ func (p *pvcBackupItemAction) Execute(
return item, nil, "", nil, nil
}
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
item,
kuberesource.PersistentVolumeClaims,
*backup,
p.crClient,
p.log,
)
if err != nil {
return nil, nil, "", nil, err
}
if !shouldSnapshot {
p.log.Debugf("CSI plugin skip snapshot for PVC %s according to the VolumeHelper setting.",
pvc.Namespace+"/"+pvc.Name)
return nil, nil, "", nil, err
}
vs, err := p.createVolumeSnapshot(pvc, backup)
if err != nil {
return nil, nil, "", nil, err

View File

@@ -61,6 +61,7 @@ func TestExecute(t *testing.T) {
expectedBackup *velerov1api.Backup
expectedDataUpload *velerov2alpha1.DataUpload
expectedPVC *corev1.PersistentVolumeClaim
resourcePolicy *corev1.ConfigMap
}{
{
name: "Skip PVC BIA when backup is in finalizing phase",
@@ -127,6 +128,16 @@ func TestExecute(t *testing.T) {
builder.WithLabels(velerov1api.BackupNameLabel, "test", velerov1api.VolumeSnapshotLabel, "")).
VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
},
{
name: "Test ResourcePolicy",
backup: builder.ForBackup("velero", "test").ResourcePolicies("resourcePolicy").SnapshotVolumes(false).Result(),
resourcePolicy: builder.ForConfigMap("velero", "resourcePolicy").Data("policy", "{\"version\":\"v1\", \"volumePolicies\":[{\"conditions\":{\"csi\": {}},\"action\":{\"type\":\"snapshot\"}}]}").Result(),
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
pv: builder.ForPersistentVolume("testPV").CSI("hostpath", "testVolume").Result(),
sc: builder.ForStorageClass("testSC").Provisioner("hostpath").Result(),
vsClass: builder.ForVolumeSnapshotClass("tescVSClass").Driver("hostpath").ObjectMeta(builder.WithLabels(velerov1api.VolumeSnapshotClassSelectorLabel, "")).Result(),
expectedErr: nil,
},
}
for _, tc := range tests {
@@ -147,6 +158,9 @@ func TestExecute(t *testing.T) {
if tc.vsClass != nil {
require.NoError(t, crClient.Create(context.Background(), tc.vsClass))
}
if tc.resourcePolicy != nil {
require.NoError(t, crClient.Create(context.Background(), tc.resourcePolicy))
}
pvcBIA := pvcBackupItemAction{
log: logger,
@@ -190,6 +204,8 @@ func TestExecute(t *testing.T) {
resultUnstructed, _, _, _, err := pvcBIA.Execute(&unstructured.Unstructured{Object: pvcMap}, tc.backup)
if tc.expectedErr != nil {
require.Equal(t, err, tc.expectedErr)
} else {
require.NoError(t, err)
}
if tc.expectedDataUpload != nil {

View File

@@ -95,6 +95,7 @@ type Backupper interface {
outBackupFile io.Writer,
backupItemActionResolver framework.BackupItemActionResolverV2,
asyncBIAOperations []*itemoperation.BackupOperation,
backupStore persistence.BackupStore,
) error
}
@@ -610,6 +611,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(
outBackupFile io.Writer,
backupItemActionResolver framework.BackupItemActionResolverV2,
asyncBIAOperations []*itemoperation.BackupOperation,
backupStore persistence.BackupStore,
) error {
gzw := gzip.NewWriter(outBackupFile)
defer gzw.Close()
@@ -726,7 +728,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(
}).Infof("Updated %d items out of an estimated total of %d (estimate will change throughout the backup finalizer)", len(backupRequest.BackedUpItems), totalItems)
}
backupStore, volumeInfos, err := kb.getVolumeInfos(*backupRequest.Backup, log)
volumeInfos, err := backupStore.GetBackupVolumeInfos(backupRequest.Backup.Name)
if err != nil {
log.WithError(err).Errorf("fail to get the backup VolumeInfos for backup %s", backupRequest.Name)
return err
@@ -810,34 +812,6 @@ type tarWriter interface {
WriteHeader(*tar.Header) error
}
func (kb *kubernetesBackupper) getVolumeInfos(
backup velerov1api.Backup,
log logrus.FieldLogger,
) (persistence.BackupStore, []*volume.BackupVolumeInfo, error) {
location := &velerov1api.BackupStorageLocation{}
if err := kb.kbClient.Get(context.Background(), kbclient.ObjectKey{
Namespace: backup.Namespace,
Name: backup.Spec.StorageLocation,
}, location); err != nil {
return nil, nil, errors.WithStack(err)
}
pluginManager := kb.pluginManager(log)
defer pluginManager.CleanupClients()
backupStore, storeErr := kb.backupStoreGetter.Get(location, pluginManager, log)
if storeErr != nil {
return nil, nil, storeErr
}
volumeInfos, err := backupStore.GetBackupVolumeInfos(backup.Name)
if err != nil {
return nil, nil, err
}
return backupStore, volumeInfos, nil
}
// updateVolumeInfos update the VolumeInfos according to the AsyncOperations
func updateVolumeInfos(
volumeInfos []*volume.BackupVolumeInfo,

View File

@@ -54,8 +54,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/persistence"
persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks"
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2"
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
@@ -4519,23 +4517,6 @@ func TestBackupNamespaces(t *testing.T) {
}
}
func TestGetVolumeInfos(t *testing.T) {
h := newHarness(t)
pluginManager := new(pluginmocks.Manager)
backupStore := new(persistencemocks.BackupStore)
h.backupper.pluginManager = func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }
h.backupper.backupStoreGetter = NewFakeSingleObjectBackupStoreGetter(backupStore)
backupStore.On("GetBackupVolumeInfos", "backup-01").Return([]*volume.BackupVolumeInfo{}, nil)
pluginManager.On("CleanupClients").Return()
backup := builder.ForBackup("velero", "backup-01").StorageLocation("default").Result()
bsl := builder.ForBackupStorageLocation("velero", "default").Result()
require.NoError(t, h.backupper.kbClient.Create(context.Background(), bsl))
_, _, err := h.backupper.getVolumeInfos(*backup, h.log)
require.NoError(t, err)
}
func TestUpdateVolumeInfos(t *testing.T) {
timeExample := time.Date(2014, 6, 5, 11, 56, 45, 0, time.Local)
now := metav1.NewTime(timeExample)

View File

@@ -24,8 +24,6 @@ import (
"strings"
"time"
"github.com/vmware-tanzu/velero/internal/volumehelper"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
@@ -42,6 +40,7 @@ import (
"github.com/vmware-tanzu/velero/internal/hook"
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
"github.com/vmware-tanzu/velero/internal/volume"
"github.com/vmware-tanzu/velero/internal/volumehelper"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/archive"
"github.com/vmware-tanzu/velero/pkg/client"

View File

@@ -741,6 +741,23 @@ func (r *itemCollector) collectNamespaces(
return nil, errors.WithStack(err)
}
for _, includedNSName := range r.backupRequest.Backup.Spec.IncludedNamespaces {
nsExists := false
// Skip checking the namespace existing when it's "*".
if includedNSName == "*" {
continue
}
for _, unstructuredNS := range unstructuredList.Items {
if unstructuredNS.GetName() == includedNSName {
nsExists = true
}
}
if !nsExists {
log.Errorf("fail to get the namespace %s specified in backup.Spec.IncludedNamespaces", includedNSName)
}
}
var singleSelector labels.Selector
var orSelectors []labels.Selector

View File

@@ -224,6 +224,17 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
},
expectedTrackedNS: []string{"ns1", "ns2"},
},
{
name: "ns specified by the IncludeNamespaces cannot be found",
backup: builder.ForBackup("velero", "backup").IncludedNamespaces("ns1", "invalid", "*").Result(),
ie: collections.NewIncludesExcludes().Includes("ns1", "invalid", "*"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
builder.ForNamespace("ns3").Result(),
},
expectedTrackedNS: []string{"ns1"},
},
}
for _, tc := range tests {

View File

@@ -412,7 +412,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
}
if err := controller.UpdatePVBStatusToFailed(s.ctx, client, &pvbs.Items[i],
fmt.Errorf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, velerov1api.PodVolumeBackupPhaseFailed),
fmt.Errorf("found a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, velerov1api.PodVolumeBackupPhaseFailed),
"", time.Now(), s.logger); err != nil {
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName())
continue

View File

@@ -1022,6 +1022,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.metrics,
s.crClient,
multiHookTracker,
s.config.resourceTimeout,
).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", controller.RestoreFinalizer)
}

View File

@@ -21,7 +21,6 @@ import (
"context"
"fmt"
"os"
"strings"
"time"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
@@ -464,7 +463,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
}
// validate the included/excluded namespaces
for _, err := range b.validateNamespaceIncludesExcludes(request.Spec.IncludedNamespaces, request.Spec.ExcludedNamespaces) {
for _, err := range collections.ValidateNamespaceIncludesExcludes(request.Spec.IncludedNamespaces, request.Spec.ExcludedNamespaces) {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
}
@@ -473,20 +472,11 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
request.Status.ValidationErrors = append(request.Status.ValidationErrors, "encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified")
}
if request.Spec.ResourcePolicy != nil && strings.EqualFold(request.Spec.ResourcePolicy.Kind, resourcepolicies.ConfigmapRefType) {
policiesConfigmap := &corev1api.ConfigMap{}
err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: request.Namespace, Name: request.Spec.ResourcePolicy.Name}, policiesConfigmap)
if err != nil {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("failed to get resource policies %s/%s configmap with err %v", request.Namespace, request.Spec.ResourcePolicy.Name, err))
}
res, err := resourcepolicies.GetResourcePoliciesFromConfig(policiesConfigmap)
if err != nil {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, errors.Wrapf(err, fmt.Sprintf("resource policies %s/%s", request.Namespace, request.Spec.ResourcePolicy.Name)).Error())
} else if err = res.Validate(); err != nil {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, errors.Wrapf(err, fmt.Sprintf("resource policies %s/%s", request.Namespace, request.Spec.ResourcePolicy.Name)).Error())
}
request.ResPolicies = res
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(*request.Backup, b.kbClient, logger)
if err != nil {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, err.Error())
}
request.ResPolicies = resourcePolicies
return request
}
@@ -596,24 +586,6 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B
return providerLocations, nil
}
func (b *backupReconciler) validateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces []string) []error {
var errs []error
if errs = collections.ValidateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces); len(errs) > 0 {
return errs
}
namespace := &corev1api.Namespace{}
for _, name := range collections.NewIncludesExcludes().Includes(includedNamespaces...).GetIncludes() {
if name == "" || name == "*" {
continue
}
if err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Name: name}, namespace); err != nil {
errs = append(errs, err)
}
}
return errs
}
// runBackup runs and uploads a validated backup. Any error returned from this function
// causes the backup to be Failed; if no error is returned, the backup's status's Errors
// field is checked to see if the backup was a partial failure.

View File

@@ -85,6 +85,7 @@ func (b *fakeBackupper) FinalizeBackup(
outBackupFile io.Writer,
backupItemActionResolver framework.BackupItemActionResolverV2,
asyncBIAOperations []*itemoperation.BackupOperation,
backupStore persistence.BackupStore,
) error {
args := b.Called(logger, backup, inBackupFile, outBackupFile, backupItemActionResolver, asyncBIAOperations)
return args.Error(0)
@@ -190,16 +191,10 @@ func TestProcessBackupValidationFailures(t *testing.T) {
},
{
name: "use old filter parameters and new filter parameters together",
backup: defaultBackup().IncludeClusterResources(true).IncludedNamespaceScopedResources("Deployment").IncludedNamespaces("foo").Result(),
backup: defaultBackup().IncludeClusterResources(true).IncludedNamespaceScopedResources("Deployment").IncludedNamespaces("default").Result(),
backupLocation: defaultBackupLocation,
expectedErrs: []string{"include-resources, exclude-resources and include-cluster-resources are old filter parameters.\ninclude-cluster-scoped-resources, exclude-cluster-scoped-resources, include-namespace-scoped-resources and exclude-namespace-scoped-resources are new filter parameters.\nThey cannot be used together"},
},
{
name: "nonexisting namespace",
backup: defaultBackup().IncludedNamespaces("non-existing").Result(),
backupLocation: defaultBackupLocation,
expectedErrs: []string{"Invalid included/excluded namespace lists: namespaces \"non-existing\" not found"},
},
}
for _, test := range tests {
@@ -214,11 +209,10 @@ func TestProcessBackupValidationFailures(t *testing.T) {
require.NoError(t, err)
var fakeClient kbclient.Client
namespace := builder.ForNamespace("foo").Result()
if test.backupLocation != nil {
fakeClient = velerotest.NewFakeControllerRuntimeClient(t, test.backupLocation, namespace)
fakeClient = velerotest.NewFakeControllerRuntimeClient(t, test.backupLocation)
} else {
fakeClient = velerotest.NewFakeControllerRuntimeClient(t, namespace)
fakeClient = velerotest.NewFakeControllerRuntimeClient(t)
}
c := &backupReconciler{
@@ -1574,43 +1568,6 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
}
}
func TestValidateNamespaceIncludesExcludes(t *testing.T) {
namespace := builder.ForNamespace("default").Result()
reconciler := &backupReconciler{
kbClient: velerotest.NewFakeControllerRuntimeClient(t, namespace),
}
// empty string as includedNamespaces
includedNamespaces := []string{""}
excludedNamespaces := []string{"test"}
errs := reconciler.validateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces)
assert.Empty(t, errs)
// "*" as includedNamespaces
includedNamespaces = []string{"*"}
excludedNamespaces = []string{"test"}
errs = reconciler.validateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces)
assert.Empty(t, errs)
// invalid namespaces
includedNamespaces = []string{"1@#"}
excludedNamespaces = []string{"2@#"}
errs = reconciler.validateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces)
assert.Len(t, errs, 2)
// not exist namespaces
includedNamespaces = []string{"non-existing-namespace"}
excludedNamespaces = []string{}
errs = reconciler.validateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces)
assert.Len(t, errs, 1)
// valid namespaces
includedNamespaces = []string{"default"}
excludedNamespaces = []string{}
errs = reconciler.validateNamespaceIncludesExcludes(includedNamespaces, excludedNamespaces)
assert.Empty(t, errs)
}
// Test_getLastSuccessBySchedule verifies that the getLastSuccessBySchedule helper function correctly returns
// the completion timestamp of the most recent completed backup for each schedule, including an entry for ad-hoc
// or non-scheduled backups.

View File

@@ -184,6 +184,7 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
outBackupFile,
backupItemActionsResolver,
operations,
backupStore,
)
if err != nil {
log.WithError(err).Error("error finalizing Backup")

View File

@@ -225,7 +225,7 @@ func TestBackupFinalizerReconcile(t *testing.T) {
backupStore.On("GetBackupVolumeInfos", mock.Anything).Return(nil, nil)
backupStore.On("PutBackupVolumeInfos", mock.Anything, mock.Anything).Return(nil)
pluginManager.On("GetBackupItemActionsV2").Return(nil, nil)
backupper.On("FinalizeBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, framework.BackupItemActionResolverV2{}, mock.Anything).Return(nil)
backupper.On("FinalizeBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, framework.BackupItemActionResolverV2{}, mock.Anything, mock.Anything).Return(nil)
_, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}})
gotErr := err != nil
assert.Equal(t, test.expectError, gotErr)

View File

@@ -40,6 +40,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
"github.com/vmware-tanzu/velero/pkg/util/kube"
corev1api "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -272,6 +273,20 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
for _, snapCont := range snapConts {
// TODO: Reset ResourceVersion prior to persisting VolumeSnapshotContents
snapCont.ResourceVersion = ""
// Make the VolumeSnapshotContent static
snapCont.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{
SnapshotHandle: snapCont.Status.SnapshotHandle,
}
// Set VolumeSnapshotRef to none exist one, because VolumeSnapshotContent
// validation webhook will check whether name and namespace are nil.
// external-snapshotter needs Source pointing to snapshot and VolumeSnapshot
// reference's UID to nil to determine the VolumeSnapshotContent is deletable.
snapCont.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
Kind: "VolumeSnapshot",
Namespace: "ns-" + string(snapCont.UID),
Name: "name-" + string(snapCont.UID),
}
err := b.client.Create(ctx, snapCont, &client.CreateOptions{})
switch {
case err != nil && apierrors.IsAlreadyExists(err):

View File

@@ -427,7 +427,7 @@ func TestReconcile(t *testing.T) {
notCreateFSBR: true,
},
{
name: "Dataupload should not be cancel with dismatch node",
name: "Dataupload should not be cancel with mismatch node",
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
du: func() *velerov2alpha1api.DataUpload {
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(true).Result()

View File

@@ -19,6 +19,7 @@ package controller
import (
"context"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
@@ -361,7 +362,11 @@ func UpdatePVBStatusToFailed(ctx context.Context, c client.Client, pvb *velerov1
if dataPathError, ok := errOut.(datapath.DataPathError); ok {
pvb.Status.SnapshotID = dataPathError.GetSnapshotID()
}
pvb.Status.Message = errors.WithMessage(errOut, msg).Error()
if len(strings.TrimSpace(msg)) == 0 {
pvb.Status.Message = errOut.Error()
} else {
pvb.Status.Message = errors.WithMessage(errOut, msg).Error()
}
err := c.Patch(ctx, pvb, client.MergeFrom(original))
if err != nil {
log.WithError(err).Error("error updating PodVolumeBackup status")

View File

@@ -22,6 +22,8 @@ import (
"sync"
"time"
storagev1api "k8s.io/api/storage/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -43,10 +45,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/util/results"
)
const (
PVPatchMaximumDuration = 10 * time.Minute
)
type restoreFinalizerReconciler struct {
client.Client
namespace string
@@ -57,6 +55,7 @@ type restoreFinalizerReconciler struct {
clock clock.WithTickerAndDelayedExecution
crClient client.Client
multiHookTracker *hook.MultiHookTracker
resourceTimeout time.Duration
}
func NewRestoreFinalizerReconciler(
@@ -68,6 +67,7 @@ func NewRestoreFinalizerReconciler(
metrics *metrics.ServerMetrics,
crClient client.Client,
multiHookTracker *hook.MultiHookTracker,
resourceTimeout time.Duration,
) *restoreFinalizerReconciler {
return &restoreFinalizerReconciler{
Client: client,
@@ -79,6 +79,7 @@ func NewRestoreFinalizerReconciler(
clock: &clock.RealClock{},
crClient: crClient,
multiHookTracker: multiHookTracker,
resourceTimeout: resourceTimeout,
}
}
@@ -161,6 +162,7 @@ func (r *restoreFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Req
volumeInfo: volumeInfo,
restoredPVCList: restoredPVCList,
multiHookTracker: r.multiHookTracker,
resourceTimeout: r.resourceTimeout,
}
warnings, errs := finalizerCtx.execute()
@@ -244,6 +246,7 @@ type finalizerContext struct {
volumeInfo []*volume.BackupVolumeInfo
restoredPVCList map[string]struct{}
multiHookTracker *hook.MultiHookTracker
resourceTimeout time.Duration
}
func (ctx *finalizerContext) execute() (results.Result, results.Result) { //nolint:unparam //temporarily ignore the lint report: result 0 is always nil (unparam)
@@ -266,6 +269,7 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
var pvWaitGroup sync.WaitGroup
var resultLock sync.Mutex
maxConcurrency := 3
semaphore := make(chan struct{}, maxConcurrency)
@@ -292,7 +296,7 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
log := ctx.logger.WithField("PVC", volInfo.PVCName).WithField("PVCNamespace", restoredNamespace)
log.Debug("patching dynamic PV is in progress")
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, PVPatchMaximumDuration, true, func(context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, ctx.resourceTimeout, true, func(context.Context) (bool, error) {
// wait for PVC to be bound
pvc := &v1.PersistentVolumeClaim{}
err := ctx.crClient.Get(context.Background(), client.ObjectKey{Name: volInfo.PVCName, Namespace: restoredNamespace}, pvc)
@@ -304,6 +308,27 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
return false, err
}
// We are handling a common but specific scenario where a PVC is in a pending state and uses a storage class with
// VolumeBindingMode set to WaitForFirstConsumer. In this case, the PV patch step is skipped to avoid
// failures due to the PVC not being bound, which could cause a timeout and result in a failed restore.
if pvc != nil && pvc.Status.Phase == v1.ClaimPending {
// check if storage class used has VolumeBindingMode as WaitForFirstConsumer
scName := *pvc.Spec.StorageClassName
sc := &storagev1api.StorageClass{}
err = ctx.crClient.Get(context.Background(), client.ObjectKey{Name: scName}, sc)
if err != nil {
errs.Add(restoredNamespace, err)
return false, err
}
// skip PV patch step for this scenario
// because pvc would not be bound and the PV patch step would fail due to timeout thus failing the restore
if *sc.VolumeBindingMode == storagev1api.VolumeBindingWaitForFirstConsumer {
log.Warnf("skipping PV patch to restore custom reclaim policy, if any: StorageClass %s used by PVC %s has VolumeBindingMode set to WaitForFirstConsumer, and the PVC is also in a pending state", scName, pvc.Name)
return true, nil
}
}
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
log.Debugf("PVC: %s not ready", pvc.Name)
return false, nil

View File

@@ -138,6 +138,7 @@ func TestRestoreFinalizerReconcile(t *testing.T) {
metrics.NewServerMetrics(),
fakeClient,
hook.NewMultiHookTracker(),
10*time.Minute,
)
r.clock = testclocks.NewFakeClock(now)
@@ -200,6 +201,7 @@ func TestUpdateResult(t *testing.T) {
metrics.NewServerMetrics(),
fakeClient,
hook.NewMultiHookTracker(),
10*time.Minute,
)
restore := builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Result()
res := map[string]results.Result{"warnings": {}, "errors": {}}

View File

@@ -642,14 +642,7 @@ func TestPeekExpose(t *testing.T) {
Name: backup.Name,
},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
Conditions: []corev1.PodCondition{
{
Type: corev1.PodScheduled,
Reason: "Unschedulable",
Message: "unrecoverable",
},
},
Phase: corev1.PodFailed,
},
}
@@ -679,7 +672,7 @@ func TestPeekExpose(t *testing.T) {
kubeClientObj: []runtime.Object{
backupPodUrecoverable,
},
err: "Pod is unschedulable: unrecoverable",
err: "Pod is in abnormal state Failed",
},
{
name: "succeed",

View File

@@ -429,14 +429,7 @@ func TestRestorePeekExpose(t *testing.T) {
Name: restore.Name,
},
Status: corev1api.PodStatus{
Phase: corev1api.PodPending,
Conditions: []corev1api.PodCondition{
{
Type: corev1api.PodScheduled,
Reason: "Unschedulable",
Message: "unrecoverable",
},
},
Phase: corev1api.PodFailed,
},
}
@@ -463,7 +456,7 @@ func TestRestorePeekExpose(t *testing.T) {
kubeClientObj: []runtime.Object{
restorePodUrecoverable,
},
err: "Pod is unschedulable: unrecoverable",
err: "Pod is in abnormal state Failed",
},
{
name: "succeed",

View File

@@ -0,0 +1,62 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumehelper
import (
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
"github.com/vmware-tanzu/velero/internal/volumehelper"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
)
// ShouldPerformSnapshotWithBackup is used for third-party plugins.
// It supports to check whether the PVC or PodVolume should be backed
// up on demand. On the other hand, the volumeHelperImpl assume there
// is a VolumeHelper instance initialized before calling the
// ShouldPerformXXX functions.
func ShouldPerformSnapshotWithBackup(
unstructured runtime.Unstructured,
groupResource schema.GroupResource,
backup velerov1api.Backup,
crClient crclient.Client,
logger logrus.FieldLogger,
) (bool, error) {
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(
backup,
crClient,
logger,
)
if err != nil {
return false, err
}
volumeHelperImpl := volumehelper.NewVolumeHelperImpl(
resourcePolicies,
backup.Spec.SnapshotVolumes,
logger,
crClient,
boolptr.IsSetToTrue(backup.Spec.DefaultVolumesToFsBackup),
true,
)
return volumeHelperImpl.ShouldPerformSnapshot(unstructured, groupResource)
}

View File

@@ -182,10 +182,6 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error {
m.repoLocker.LockExclusive(repo.Name)
defer m.repoLocker.UnlockExclusive(repo.Name)
prd, err := m.getRepositoryProvider(repo)
if err != nil {
return errors.WithStack(err)
}
param, err := m.assembleRepoParam(repo)
if err != nil {
return errors.WithStack(err)
@@ -208,11 +204,7 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error {
return nil
}
if err := prd.BoostRepoConnect(context.Background(), param); err != nil {
return errors.WithStack(err)
}
log.Info("Start to maintence repo")
log.Info("Start to maintenance repo")
maintenanceJob, err := buildMaintenanceJob(m.maintenanceCfg, param, m.client, m.namespace)
if err != nil {

View File

@@ -121,14 +121,7 @@ func IsPodUnrecoverable(pod *corev1api.Pod, log logrus.FieldLogger) (bool, strin
return true, fmt.Sprintf("Pod is in abnormal state %s", pod.Status.Phase)
}
if pod.Status.Phase == corev1api.PodPending && len(pod.Status.Conditions) > 0 {
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1api.PodScheduled && condition.Reason == "Unschedulable" {
log.Warnf("Pod is unschedulable %s", condition.Message)
return true, fmt.Sprintf("Pod is unschedulable: %s", condition.Message)
}
}
}
// removed "Unschedulable" check since unschedulable condition isn't always permanent
// Check the Status field
for _, containerStatus := range pod.Status.ContainerStatuses {

View File

@@ -401,21 +401,6 @@ func TestIsPodUnrecoverable(t *testing.T) {
},
want: false,
},
{
name: "pod is unschedulable",
pod: &corev1api.Pod{
Status: corev1api.PodStatus{
Phase: corev1api.PodPending,
Conditions: []corev1api.PodCondition{
{
Type: corev1api.PodScheduled,
Reason: "Unschedulable",
},
},
},
},
want: true,
},
{
name: "pod is normal",
pod: &corev1api.Pod{

View File

@@ -581,7 +581,7 @@ func TestGetPodVolumeNameForPVC(t *testing.T) {
expectedVolumeName string
}{
{
name: "should get volume name for pod with multuple PVCs",
name: "should get volume name for pod with multiple PVCs",
pod: v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{

View File

@@ -241,6 +241,8 @@ data:
<old-node-name>: <new-node-name>
```
Note: This feature doesn't work for PVCs with ```WaitForFirstConsumer``` as the ```volumeBindingMode```. These kind of PVCs won't be bound until the pod is scheduled and the scheduler will overwrite the selected-node annotation to the node where the pod is scheduled to.
## Restoring into a different namespace
Velero can restore resources into a different namespace than the one they were backed up from. To do this, use the `--namespace-mappings` flag:

View File

@@ -19,7 +19,7 @@ package test
import (
"context"
"fmt"
"math/rand"
"math/rand/v2"
"strings"
"time"
@@ -105,8 +105,7 @@ func (t *TestCase) Init() error {
}
func (t *TestCase) GenerateUUID() string {
rand.Seed(time.Now().UnixNano())
return fmt.Sprintf("%08d", rand.Intn(100000000))
return fmt.Sprintf("%08d", rand.IntN(100000000))
}
func (t *TestCase) CreateResources() error {
@@ -168,9 +167,16 @@ func (t *TestCase) Verify() error {
func (t *TestCase) Start() error {
t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour)
veleroCfg := t.GetTestCase().VeleroCfg
if (veleroCfg.CloudProvider == Azure || veleroCfg.CloudProvider == AWS) && strings.Contains(t.GetTestCase().CaseBaseName, "nodeport") {
if (veleroCfg.CloudProvider == Azure || veleroCfg.CloudProvider == AWS) &&
strings.Contains(t.GetTestCase().CaseBaseName, "nodeport") {
Skip("Skip due to issue https://github.com/kubernetes/kubernetes/issues/114384 on AKS")
}
if veleroCfg.UploaderType == UploaderTypeRestic &&
strings.Contains(t.GetTestCase().CaseBaseName, "ParallelFiles") {
Skip("Skip Parallel Files upload and download test cases for environments using Restic as uploader.")
}
return nil
}
@@ -178,11 +184,15 @@ func (t *TestCase) Clean() error {
veleroCfg := t.GetTestCase().VeleroCfg
if !veleroCfg.Debug {
By(fmt.Sprintf("Clean namespace with prefix %s after test", t.CaseBaseName), func() {
CleanupNamespaces(t.Ctx, t.Client, t.CaseBaseName)
if err := CleanupNamespaces(t.Ctx, t.Client, t.CaseBaseName); err != nil {
fmt.Println("Fail to cleanup namespaces: ", err)
}
})
By("Clean backups after test", func() {
veleroCfg.ClientToInstallVelero = &t.Client
DeleteAllBackups(t.Ctx, &veleroCfg)
if err := DeleteAllBackups(t.Ctx, &veleroCfg); err != nil {
fmt.Println("Fail to clean backups after test: ", err)
}
})
}
return nil

View File

@@ -38,6 +38,8 @@ const AWS = "aws"
const Gcp = "gcp"
const Vsphere = "vsphere"
const UploaderTypeRestic = "restic"
var PublicCloudProviders = []string{AWS, Azure, Gcp, Vsphere}
var LocalCloudProviders = []string{Kind, VanillaZFS}
var CloudProviders = append(PublicCloudProviders, LocalCloudProviders...)