mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-11 23:32:53 +00:00
Compare commits
149 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06d12dec40 | ||
|
|
b68486221d | ||
|
|
5abd318b2c | ||
|
|
7c051514fd | ||
|
|
c8fd9d4d62 | ||
|
|
ccfbcc5455 | ||
|
|
ea25b8a793 | ||
|
|
2d6578635d | ||
|
|
fc44f3b8f0 | ||
|
|
df72745909 | ||
|
|
453bd93c90 | ||
|
|
65939c920e | ||
|
|
c042d477ab | ||
|
|
5c44ed49a5 | ||
|
|
3325a0cd1b | ||
|
|
b2d3fa0bec | ||
|
|
25fc2f4d6e | ||
|
|
a036e8d463 | ||
|
|
f92cdb1f76 | ||
|
|
0531dbb1a2 | ||
|
|
de55794381 | ||
|
|
d7b4b0a770 | ||
|
|
f1c93bd6c4 | ||
|
|
06e3773b22 | ||
|
|
32a8bbb9ac | ||
|
|
84d8bbda24 | ||
|
|
86e34eec28 | ||
|
|
5923046471 | ||
|
|
d1399225da | ||
|
|
6438fc9a69 | ||
|
|
a674a1eaff | ||
|
|
bb4f9094fd | ||
|
|
1264c438c1 | ||
|
|
7e35fd3261 | ||
|
|
482ec13d38 | ||
|
|
dd825ef8bb | ||
|
|
dc525aa045 | ||
|
|
36ad5dafa9 | ||
|
|
7b76047596 | ||
|
|
f1fcec3514 | ||
|
|
17ad487803 | ||
|
|
bb6c1f60ea | ||
|
|
0be6ad3a06 | ||
|
|
1c462d5f6d | ||
|
|
32deef7ae3 | ||
|
|
72b5e7aad6 | ||
|
|
5c4fdfe147 | ||
|
|
226237bab4 | ||
|
|
bbc9790316 | ||
|
|
10744ec516 | ||
|
|
905cd43140 | ||
|
|
3034cdb448 | ||
|
|
353ff55e42 | ||
|
|
468017d7db | ||
|
|
6bf705fd25 | ||
|
|
7a909d8ff5 | ||
|
|
ef1b9816b2 | ||
|
|
457fcc6893 | ||
|
|
b498847b5b | ||
|
|
af9697814e | ||
|
|
d92a051795 | ||
|
|
a3cb39d62e | ||
|
|
c1ace31466 | ||
|
|
8bf98e8895 | ||
|
|
e53cfdf85e | ||
|
|
d93cc9094a | ||
|
|
15dd67e203 | ||
|
|
877592194b | ||
|
|
17b495fcfd | ||
|
|
b99a59480d | ||
|
|
a789976a03 | ||
|
|
52878de077 | ||
|
|
432a5fe566 | ||
|
|
175047baa9 | ||
|
|
0eaf14ed19 | ||
|
|
c415fd4bcc | ||
|
|
554403df5c | ||
|
|
aba64ba151 | ||
|
|
3a410c9f04 | ||
|
|
2f92f78be5 | ||
|
|
9d5dd8e09d | ||
|
|
6103073551 | ||
|
|
83f892d81f | ||
|
|
2cd15f1e4b | ||
|
|
27a89df34d | ||
|
|
e4c2b2b157 | ||
|
|
edefe7a63b | ||
|
|
a097094bcf | ||
|
|
bc4dc6c0c8 | ||
|
|
343e54f1b8 | ||
|
|
08d44b02a8 | ||
|
|
a8c76a4a00 | ||
|
|
0623ac363a | ||
|
|
1aea12a80c | ||
|
|
7112c62e49 | ||
|
|
dcb891a307 | ||
|
|
21353f00a8 | ||
|
|
5e7114899b | ||
|
|
b035680ce6 | ||
|
|
9eb133e635 | ||
|
|
6f1262d4c6 | ||
|
|
48e3278c6c | ||
|
|
acfc6e474f | ||
|
|
993d2c775f | ||
|
|
b70b01cde9 | ||
|
|
8b8a5a2bcc | ||
|
|
5b36cd7e83 | ||
|
|
3240fb196c | ||
|
|
d9859d99ba | ||
|
|
18d4fe45e8 | ||
|
|
60d5bb22f7 | ||
|
|
9468b8cfa9 | ||
|
|
420562111b | ||
|
|
cf0b2e9139 | ||
|
|
506415e60c | ||
|
|
3733a40637 | ||
|
|
fe1ade0226 | ||
|
|
86e1a74937 | ||
|
|
6260a44e62 | ||
|
|
06d9bfae8d | ||
|
|
4d1617470f | ||
|
|
1b2c82c9eb | ||
|
|
040060082a | ||
|
|
fc653bdfbe | ||
|
|
6790a18814 | ||
|
|
93995bfd00 | ||
|
|
80572934dc | ||
|
|
41d9b67945 | ||
|
|
a06107ac70 | ||
|
|
40a94e39ad | ||
|
|
7ea0d434d6 | ||
|
|
6b884ecc39 | ||
|
|
183f7ac154 | ||
|
|
75bda412a1 | ||
|
|
a2eb10df8f | ||
|
|
90bc1abd21 | ||
|
|
45165503ba | ||
|
|
53530130a5 | ||
|
|
ed256d74dd | ||
|
|
ab28a09a07 | ||
|
|
90f4cc5497 | ||
|
|
f505ed709b | ||
|
|
28074e3f37 | ||
|
|
240f33c09d | ||
|
|
fd08848471 | ||
|
|
5f585be24b | ||
|
|
5480acf0a0 | ||
|
|
e2d3e84bab | ||
|
|
0c0ccf949b |
2
.github/workflows/crds-verify-kind.yaml
vendored
2
.github/workflows/crds-verify-kind.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
|
||||
4
.github/workflows/e2e-test-kind.yaml
vendored
4
.github/workflows/e2e-test-kind.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
2
.github/workflows/pr-ci-check.yml
vendored
2
.github/workflows/pr-ci-check.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
7
.github/workflows/push.yml
vendored
7
.github/workflows/push.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
@@ -48,7 +48,10 @@ jobs:
|
||||
version: latest
|
||||
|
||||
- name: Build
|
||||
run: make local
|
||||
run: |
|
||||
make local
|
||||
# Clean go cache to ease the build environment storage pressure.
|
||||
go clean -modcache -cache
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20-bullseye as velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.10-bullseye as velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -43,10 +43,11 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go build -o /output/${BIN} \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20-bullseye as restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.10-bullseye as restic-builder
|
||||
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
@@ -65,10 +66,11 @@ COPY . /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$(echo "${GOARM}" | cut -c2-) && \
|
||||
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh
|
||||
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM gcr.io/distroless/base-nossl-debian11:nonroot
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.11
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
@@ -76,5 +78,5 @@ COPY --from=velero-builder /output /
|
||||
|
||||
COPY --from=restic-builder /output /
|
||||
|
||||
USER nonroot:nonroot
|
||||
USER cnb:cnb
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|----------------------------------------|
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.26.7, and 1.27.3 |
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.27.6 and 1.28.0 |
|
||||
| 1.11 | 1.18-latest | 1.23.10, 1.24.9, 1.25.5, and 1.26.1 |
|
||||
| 1.10 | 1.18-latest | 1.22.5, 1.23.8, 1.24.6 and 1.25.1 |
|
||||
| 1.9 | 1.18-latest | 1.20.5, 1.21.2, 1.22.5, 1.23, and 1.24 |
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.20 as tilt-helper
|
||||
FROM golang:1.20.10 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -100,7 +100,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
|
||||
* Enable staticcheck linter. (#5788, @blackpiglet)
|
||||
* Set Kopia IgnoreUnknownTypes in ErrorHandlingPolicy to True for ignoring backup unknown file type (#5786, @qiuming-best)
|
||||
* Bump up Restic version to 0.15.0 (#5784, @qiuming-best)
|
||||
* Add File system backup related matrics to Grafana dashboard
|
||||
* Add File system backup related metrics to Grafana dashboard
|
||||
- Add metrics backup_warning_total for record of total warnings
|
||||
- Add metrics backup_last_status for record of last status of the backup (#5779, @allenxu404)
|
||||
* Design for Handling backup of volumes by resources filters (#5773, @qiuming-best)
|
||||
|
||||
@@ -1,3 +1,78 @@
|
||||
## v1.12.2
|
||||
### 2023-11-20
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.12.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.12.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.12/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.12/upgrade-to-1.12/
|
||||
|
||||
### All changes
|
||||
* Fix issue #7068, due to a behavior of CSI external snapshotter, manipulations of VS and VSC may not be handled in the same order inside external snapshotter as the API is called. So add a protection finalizer to ensure the order (#7114, @Lyndon-Li)
|
||||
* Update Backup.Status.CSIVolumeSnapshotsCompleted during finalize (#7111, @kaovilai)
|
||||
* Cherry-pick #6917 - Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers (#7049, @27149chen)
|
||||
* Bump up Velero base image to latest patch release (#7110, @allenxu404)
|
||||
* Fix the node-agent missing metrics-address defines. (#7098, @yanggangtony)
|
||||
* Fix issue #7094, fallback to full backup if previous snapshot is not found (#7097, @Lyndon-Li)
|
||||
* Add DataUpload Result and CSI VolumeSnapshot check for restore PV. (#7087, @blackpiglet)
|
||||
* Fix issue #7027, data mover backup exposer should not assume the first volume as the backup volume in backup pod (#7060, @Lyndon-Li)
|
||||
* Truncate the credential file to avoid the change of secret content messing it up (#7058, @ywk253100)
|
||||
* restore: Use warning when Create IsAlreadyExist and Get error (#7054, @kaovilai)
|
||||
* Read information from the credential specified by BSL (#7033, @ywk253100)
|
||||
* Fix issue 6913: Velero Built-in Datamover: Backup stucks in phase WaitingForPluginOperations when Node Agent pod gets restarted (#7025, @shubham-pampattiwar)
|
||||
* Fix unified repository (kopia) s3 credentials profile selection (#6997, @kaovilai)
|
||||
|
||||
## v1.12.1
|
||||
### 2023-10-20
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.12.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.12.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.12/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.12/upgrade-to-1.12/
|
||||
|
||||
### Highlights
|
||||
|
||||
#### Data Mover Adds Support for Block Mode Volumes
|
||||
For PersistentVolumes with volumeMode set as Block, the volumes are mounted as raw block devices in pods, in 1.12.1, Velero CSI snapshot data movement supports to backup and restore this kind of volumes under linux based Kubernetes clusters.
|
||||
|
||||
#### New Parameter in Installation to Enable Data Mover
|
||||
The `velero install` sub-command now includes a new parameter,`--default-snapshot-move-data`, which configures Velero server to move data by default for all snapshots supporting data movement. This feature is useful for users who will always want to use VBDM for backups instead of plain CSI , as they no longer need to specify the `--snapshot-move-data` flag for each individual backup.
|
||||
|
||||
#### Velero Base Image change
|
||||
The base image previously used by Velero was `distroless`, which contains several CVEs cannot be addressed quickly. As a result, Velero will now use `paketobuildpacks` image starting from this new version.
|
||||
|
||||
### Limitations/Known issues
|
||||
* The data mover's support for block mode volumes is currently only applicable to Linux environments.
|
||||
|
||||
### All changes
|
||||
* Import auth provider plugins (#6970, @0x113)
|
||||
* Perf improvements for existing resource restore (#6948, @sseago)
|
||||
* Retry failed create when using generateName (#6943, @sseago)
|
||||
* Fix issue #6647, add the --default-snapshot-move-data parameter to Velero install, so that users don't need to specify --snapshot-move-data per backup when they want to move snapshot data for all backups (#6940, @Lyndon-Li)
|
||||
* Partially fix #6734, guide Kubernetes' scheduler to spread backup pods evenly across nodes as much as possible, so that data mover backup could achieve better parallelism (#6935, @Lyndon-Li)
|
||||
* Replace the base image with paketobuildpacks image (#6934, @ywk253100)
|
||||
* Add support for block volumes with Kopia (#6897, @dzaninovic)
|
||||
* Set ParallelUploadAboveSize as MaxInt64 and flush repo after setting up policy so that policy is retrieved correctly by TreeForSource (#6886, @Lyndon-Li)
|
||||
* Kubernetes 1.27 new job label batch.kubernetes.io/controller-uid are deleted during restore per https://github.com/kubernetes/kubernetes/pull/114930 (#6713, @kaovilai)
|
||||
* Add `orLabelSelectors` for backup, restore commands (#6881, @nilesh-akhade)
|
||||
* Fix issue #6859, move plugin depending podvolume functions to util pkg, so as to remove the dependencies to unnecessary repository packages like kopia, azure, etc. (#6877, @Lyndon-Li)
|
||||
* Fix issue #6786, always delete VSC regardless of the deletion policy (#6873, @Lyndon-Li)
|
||||
* Fix #6988, always get region from BSL if it is not empty (#6991, @Lyndon-Li)
|
||||
* Add both non-Windows version and Windows version code for PVC block mode logic. (#6986, @blackpiglet)
|
||||
|
||||
## v1.12
|
||||
### 2023-08-18
|
||||
|
||||
@@ -23,17 +98,17 @@ CSI Snapshot Data Movement is useful in below scenarios:
|
||||
* For on-premises users, the storage usually doesn't support durable snapshots, so it is impossible/less efficient/cost ineffective to keep volume snapshots by the storage This feature helps to move the snapshot data to a storage with lower cost and larger scale for long time preservation.
|
||||
* For public cloud users, this feature helps users to fulfill the multiple cloud strategy. It allows users to back up volume snapshots from one cloud provider and preserve or restore the data to another cloud provider. Then users will be free to flow their business data across cloud providers based on Velero backup and restore
|
||||
|
||||
CSI Snapshot Data Movement is built according to the Volume Snapshot Data Movement design ([Volume Snapshot Data Movement](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md)). More details can be found in the design.
|
||||
CSI Snapshot Data Movement is built according to the Volume Snapshot Data Movement design ([Volume Snapshot Data Movement design](https://github.com/vmware-tanzu/velero/blob/main/design/volume-snapshot-data-movement/volume-snapshot-data-movement.md)). Additionally, guidance on how to use the feature can be found in the Volume Snapshot Data Movement doc([Volume Snapshot Data Movement doc](https://velero.io/docs/v1.12/csi-snapshot-data-movement)).
|
||||
|
||||
#### Resource Modifiers
|
||||
In many use cases, customers often need to substitute specific values in Kubernetes resources during the restoration process like changing the namespace, changing the storage class, etc.
|
||||
|
||||
To address this need, Resource Modifiers (also known as JSON Substitutions) offer a generic solution in the restore workflow. It allows the user to define filters for specific resources and then specify a JSON patch (operator, path, value) to apply to the resource. This feature simplifies the process of making substitutions without requiring the implementation of a new RestoreItemAction plugin. More details can be found in Volume Snapshot Resource Modifiers design ([Resource Modifiers](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/json-substitution-action-design.md)).
|
||||
To address this need, Resource Modifiers (also known as JSON Substitutions) offer a generic solution in the restore workflow. It allows the user to define filters for specific resources and then specify a JSON patch (operator, path, value) to apply to the resource. This feature simplifies the process of making substitutions without requiring the implementation of a new RestoreItemAction plugin. More design details can be found in Resource Modifiers design ([Resource Modifiers design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/json-substitution-action-design.md)). For instructions on how to use the feature, please refer to Resource Modifiers doc([Resource Modifiers doc](https://velero.io/docs/v1.12/restore-resource-modifiers)).
|
||||
|
||||
#### Multiple VolumeSnapshotClasses
|
||||
Prior to version 1.12, the Velero CSI plugin would choose the VolumeSnapshotClass in the cluster based on matching driver names and the presence of the "velero.io/csi-volumesnapshot-class" label. However, this approach proved inadequate for many user scenarios.
|
||||
|
||||
With the introduction of version 1.12, Velero now offers support for multiple VolumeSnapshotClasses in the CSI Plugin, enabling users to select a specific class for a particular backup. More details can be found in Multiple VolumeSnapshotClasses design ([Multiple VolumeSnapshotClasses](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/multiple-csi-volumesnapshotclass-support.md)).
|
||||
With the introduction of version 1.12, Velero now offers support for multiple VolumeSnapshotClasses in the CSI Plugin, enabling users to select a specific class for a particular backup. More design details can be found in Multiple VolumeSnapshotClasses design ([Multiple VolumeSnapshotClasses design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/multiple-csi-volumesnapshotclass-support.md)). For instructions on how to use the feature, please refer to Multiple VolumeSnapshotClasses doc ([Multiple VolumeSnapshotClasses doc](https://velero.io/docs/v1.12/csi/#implementation-choices)).
|
||||
|
||||
#### Restore Finalizer
|
||||
Before v1.12, the restore controller would only delete restore resources but wouldn’t delete restore data from the backup storage location when the command `velero restore delete` was executed. The only chance Velero deletes restores data from the backup storage location is when the associated backup is deleted.
|
||||
@@ -51,10 +126,12 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
|
||||
* Prior to v1.12, the parameter `uploader-type` for Velero installation had a default value of "restic". However, starting from this version, the default value has been changed to "kopia". This means that Velero will now use Kopia as the default path for file system backup.
|
||||
* The ways of setting CSI snapshot time have changed in v1.12. First, the sync waiting time for creating a snapshot handle in the CSI plugin is changed from the fixed 10 minutes into backup.Spec.CSISnapshotTimeout. The second, the async waiting time for VolumeSnapshot and VolumeSnapshotContent's status turning into `ReadyToUse` in operation uses the operation's timeout. The default value is 4 hours.
|
||||
* As from [Velero helm chart v4.0.0](https://github.com/vmware-tanzu/helm-charts/releases/tag/velero-4.0.0), it supports multiple BSL and VSL, and the BSL and VSL have changed from the map into a slice, and[ this breaking change](https://github.com/vmware-tanzu/helm-charts/pull/413) is not backward compatible. So it would be best to change the BSL and VSL configuration into slices before the Upgrade.
|
||||
* Prior to v1.12, deleting the Velero namespace would easily remove all the resources within it. However, with the introduction of finalizers attached to the Velero CR including `restore`, `dataupload`, and `datadownload` in this version, directly deleting Velero namespace may get stuck indefinitely because the pods responsible for handling the finalizers might be deleted before the resources attached to the finalizers. To avoid this issue, please use the command `velero uninstall` to delete all the Velero resources or ensure that you handle the finalizer appropriately before deleting the Velero namespace.
|
||||
|
||||
|
||||
### Limitations/Known issues
|
||||
* The Azure plugin supports Azure AD Workload identity way, but it only works for Velero native snapshots. It cannot support filesystem backup and snapshot data mover scenarios.
|
||||
* File System backup under Kopia path and CSI Snapshot Data Movement backup fail to back up files that are large the 2GiB due to issue https://github.com/vmware-tanzu/velero/issues/6668.
|
||||
|
||||
|
||||
### All Changes
|
||||
@@ -132,3 +209,10 @@ prior PVC restores with CSI (#6111, @eemcmullan)
|
||||
* Make GetPluginConfig accessible from other packages. (#6151, @tkaovila)
|
||||
* Ignore not found error during patching managedFields (#6136, @ywk253100)
|
||||
* Fix the goreleaser issues and add a new goreleaser action (#6109, @blackpiglet)
|
||||
* Add CSI snapshot data movement doc (#6793, @Lyndon-Li)
|
||||
* Use old(origin) namespace in resource modifier conditions in case namespace may change during restore (#6724, @27149chen)
|
||||
* Fix #6752: add namespace exclude check. (#6762, @blackpiglet)
|
||||
* Update restore controller logic for restore deletion (#6761, @ywk253100)
|
||||
* Fix issue #6753, remove the check for read-only BSL in restore async operation controller since Velero cannot fully support read-only mode BSL in restore at present (#6758, @Lyndon-Li)
|
||||
* Fixes #6636, skip subresource in resource discovery (#6688, @27149chen)
|
||||
* This pr made some improvements in Resource Modifiers:1. add label selector 2. change the field name from groupKind to groupResource (#6704, @27149chen)
|
||||
|
||||
@@ -61,7 +61,7 @@ in progress for 1.9.
|
||||
* Add rbac and annotation test cases (#4455, @mqiu)
|
||||
* remove --crds-version in velero install command. (#4446, @jxun)
|
||||
* Upgrade e2e test vsphere plugin (#4440, @mqiu)
|
||||
* Fix e2e test failures for the inappropriate optimaze of velero install (#4438, @mqiu)
|
||||
* Fix e2e test failures for the inappropriate optimize of velero install (#4438, @mqiu)
|
||||
* Limit backup namespaces on test resource filtering cases (#4437, @mqiu)
|
||||
* Bump up Go to 1.17 (#4431, @reasonerjt)
|
||||
* Added `<backup name>`-itemsnapshots.json.gz to the backup format. This file exists
|
||||
|
||||
@@ -49,6 +49,9 @@ spec:
|
||||
- mountPath: /host_pods
|
||||
mountPropagation: HostToContainer
|
||||
name: host-pods
|
||||
- mountPath: /var/lib/kubelet/plugins
|
||||
mountPropagation: HostToContainer
|
||||
name: host-plugins
|
||||
- mountPath: /scratch
|
||||
name: scratch
|
||||
- mountPath: /credentials
|
||||
@@ -60,6 +63,9 @@ spec:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
name: host-pods
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
name: host-plugins
|
||||
- emptyDir: {}
|
||||
name: scratch
|
||||
- name: cloud-credentials
|
||||
|
||||
@@ -175,7 +175,7 @@ If there are one or more, download the backup tarball from backup storage, untar
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
Another proposal for higher level `DeleteItemActions` was initially included, which would require implementors to individually download the backup tarball themselves.
|
||||
Another proposal for higher level `DeleteItemActions` was initially included, which would require implementers to individually download the backup tarball themselves.
|
||||
While this may be useful long term, it is not a good fit for the current goals as each plugin would be re-implementing a lot of boilerplate.
|
||||
See the deletion-plugins.md file for this alternative proposal in more detail.
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ Currently velero supports substituting certain values in the K8s resources durin
|
||||
<!-- ## Background -->
|
||||
|
||||
## Goals
|
||||
- Allow the user to specify a GroupKind, Name(optional), JSON patch for modification.
|
||||
- Allow the user to specify a GroupResource, Name(optional), JSON patch for modification.
|
||||
- Allow the user to specify multiple JSON patch.
|
||||
|
||||
## Non Goals
|
||||
@@ -74,7 +74,7 @@ velero restore create --from-backup backup-1 --resource-modifier-configmap resou
|
||||
|
||||
### Resource Modifier ConfigMap Structure
|
||||
- User first needs to provide details on which resources the JSON Substitutions need to be applied.
|
||||
- For this the user will provide 4 inputs - Namespaces(for NS Scoped resources), GroupKind (kind.group format similar to includeResources field in velero) and Name Regex(optional).
|
||||
- For this the user will provide 4 inputs - Namespaces(for NS Scoped resources), GroupResource (resource.group format similar to includeResources field in velero) and Name Regex(optional).
|
||||
- If the user does not provide the Name, the JSON Substitutions will be applied to all the resources of the given Group and Kind under the given namespaces.
|
||||
|
||||
- Further the use will specify the JSON Patch using the structure of kubectl's "JSON Patch" based inputs.
|
||||
@@ -83,7 +83,7 @@ velero restore create --from-backup backup-1 --resource-modifier-configmap resou
|
||||
version: v1
|
||||
resourceModifierRules:
|
||||
- conditions:
|
||||
groupKind: persistentvolumeclaims
|
||||
groupResource: persistentvolumeclaims
|
||||
resourceNameRegex: "mysql.*"
|
||||
namespaces:
|
||||
- bar
|
||||
@@ -96,6 +96,7 @@ resourceModifierRules:
|
||||
path: "/metadata/labels/test"
|
||||
```
|
||||
- The above configmap will apply the JSON Patch to all the PVCs in the namespaces bar and foo with name starting with mysql. The JSON Patch will replace the storageClassName with "premium" and remove the label "test" from the PVCs.
|
||||
- Note that the Namespace here is the original namespace of the backed up resource, not the new namespace where the resource is going to be restored.
|
||||
- The user can specify multiple JSON Patches for a particular resource. The patches will be applied in the order specified in the configmap. A subsequent patch is applied in order and if multiple patches are specified for the same path, the last patch will override the previous patches.
|
||||
- The user can specify multiple resourceModifierRules in the configmap. The rules will be applied in the order specified in the configmap.
|
||||
|
||||
@@ -119,7 +120,7 @@ kubectl create cm <configmap-name> --from-file <yaml-file> -n velero
|
||||
version: v1
|
||||
resourceModifierRules:
|
||||
- conditions:
|
||||
groupKind: persistentvolumeclaims.storage.k8s.io
|
||||
groupResource: persistentvolumeclaims.storage.k8s.io
|
||||
resourceNameRegex: ".*"
|
||||
namespaces:
|
||||
- bar
|
||||
|
||||
@@ -67,12 +67,12 @@ The Velero CSI plugin chooses the VolumeSnapshotClass in the cluster that has th
|
||||
metadata:
|
||||
name: backup-1
|
||||
annotations:
|
||||
velero.io/csi-volumesnapshot-class/csi.cloud.disk.driver: csi-diskdriver-snapclass
|
||||
velero.io/csi-volumesnapshot-class/csi.cloud.file.driver: csi-filedriver-snapclass
|
||||
velero.io/csi-volumesnapshot-class/<driver name>: csi-snapclass
|
||||
velero.io/csi-volumesnapshot-class_csi.cloud.disk.driver: csi-diskdriver-snapclass
|
||||
velero.io/csi-volumesnapshot-class_csi.cloud.file.driver: csi-filedriver-snapclass
|
||||
velero.io/csi-volumesnapshot-class_<driver name>: csi-snapclass
|
||||
```
|
||||
|
||||
To query the annotations on a backup: "velero.io/csi-volumesnapshot-class/'driver name'" - where driver names comes from the PVC's driver.
|
||||
To query the annotations on a backup: "velero.io/csi-volumesnapshot-class_'driver name'" - where driver names comes from the PVC's driver.
|
||||
|
||||
2. **Support VolumeSnapshotClass selection at PVC level**
|
||||
The user can annotate the PVCs with driver and VolumeSnapshotClass name. The CSI plugin will use the VolumeSnapshotClass specified in the annotation. If the annotation is not present, the CSI plugin will use the default VolumeSnapshotClass for the driver. If the VolumeSnapshotClass provided is of a different driver, the CSI plugin will use the default VolumeSnapshotClass for the driver.
|
||||
|
||||
193
design/merge-patch-and-strategic-in-resource-modifier.md
Normal file
193
design/merge-patch-and-strategic-in-resource-modifier.md
Normal file
@@ -0,0 +1,193 @@
|
||||
# Proposal to Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers
|
||||
|
||||
- [Proposal to Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers](#proposal-to-support-json-merge-patch-and-strategic-merge-patch-in-resource-modifiers)
|
||||
- [Abstract](#abstract)
|
||||
- [Goals](#goals)
|
||||
- [Non Goals](#non-goals)
|
||||
- [User Stories](#user-stories)
|
||||
- [Scenario 1](#scenario-1)
|
||||
- [Scenario 2](#scenario-2)
|
||||
- [Detailed Design](#detailed-design)
|
||||
- [How to choose the right patch type](#how-to-choose-the-right-patch-type)
|
||||
- [New Field MergePatches](#new-field-mergepatches)
|
||||
- [New Field StrategicPatches](#new-field-strategicpatches)
|
||||
- [Conditional Patches in ALL Patch Types](#conditional-patches-in-all-patch-types)
|
||||
- [Wildcard Support for GroupResource](#wildcard-support-for-groupresource)
|
||||
- [Helper Command to Generate Merge Patch and Strategic Merge Patch](#helper-command-to-generate-merge-patch-and-strategic-merge-patch)
|
||||
- [Security Considerations](#security-considerations)
|
||||
- [Compatibility](#compatibility)
|
||||
- [Implementation](#implementation)
|
||||
- [Future Enhancements](#future-enhancements)
|
||||
- [Open Issues](#open-issues)
|
||||
|
||||
## Abstract
|
||||
Velero introduced the concept of Resource Modifiers in v1.12.0. This feature allows the user to specify a configmap with a set of rules to modify the resources during restore. The user can specify the filters to select the resources and then specify the JSON Patch to apply on the resource. This feature is currently limited to the operations supported by JSON Patch RFC.
|
||||
This proposal is to add support for JSON Merge Patch and Strategic Merge Patch in the Resource Modifiers. This will allow the user to use the same configmap to apply JSON Merge Patch and Strategic Merge Patch on the resources during restore.
|
||||
|
||||
## Goals
|
||||
- Allow the user to specify a JSON patch, JSON Merge Patch or Strategic Merge Patch for modification.
|
||||
- Allow the user to specify multiple JSON Patch, JSON Merge Patch or Strategic Merge Patch.
|
||||
- Allow the user to specify mixed JSON Patch, JSON Merge Patch and Strategic Merge Patch in the same configmap.
|
||||
|
||||
## Non Goals
|
||||
- Deprecating the existing RestoreItemAction plugins for standard substitutions(like changing the namespace, changing the storage class, etc.)
|
||||
|
||||
## User Stories
|
||||
|
||||
### Scenario 1
|
||||
- Alice has some Pods and part of them have an annotation `{"for": "bar"}`.
|
||||
- Alice wishes to restore these Pods to a different cluster without this annotation.
|
||||
- Alice can use this feature to remove this annotation during restore.
|
||||
|
||||
### Scenario 2
|
||||
- Bob has a Pod with several containers and one container with name nginx has an image `repo1/nginx`.
|
||||
- Bob wishes to restore this Pod to a different cluster, but new cluster can not access repo1, so he pushes the image to repo2.
|
||||
- Bob can use this feature to update the image of container nginx to `repo2/nginx` during restore.
|
||||
|
||||
## Detailed Design
|
||||
- The design and approach is inspired by kubectl patch command and [this doc](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/).
|
||||
- New fields `MergePatches` and `StrategicPatches` will be added to the `ResourceModifierRule` struct to support all three patch types.
|
||||
- Only one of the three patch types can be specified in a single `ResourceModifierRule`.
|
||||
- Add wildcard support for `groupResource` in `conditions` struct.
|
||||
- The workflow to create Resource Modifier ConfigMap and reference it in RestoreSpec will remain the same as described in document [Resource Modifiers](https://github.com/vmware-tanzu/velero/blob/main/site/content/docs/main/restore-resource-modifiers.md).
|
||||
|
||||
### How to choose the right patch type
|
||||
- [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) is a naively simple format, with limited usability. Probably it is a good choice if you are building something small, with very simple JSON Schema.
|
||||
- [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) is a more complex format, but it is applicable to any JSON documents. For a comparison of JSON patch and JSON merge patch, see [JSON Patch and JSON Merge Patch](https://erosb.github.io/post/json-patch-vs-merge-patch/).
|
||||
- Strategic Merge Patch is a Kubernetes defined patch type, mainly used to process resources of type list. You can replace/merge a list, add/remove items from a list by key, change the order of items in a list, etc. Strategic merge patch is not supported for custom resources. For more details, see [this doc](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/).
|
||||
|
||||
### New Field MergePatches
|
||||
MergePatches is a list to specify the merge patches to be applied on the resource. The merge patches will be applied in the order specified in the configmap. A subsequent patch is applied in order and if multiple patches are specified for the same path, the last patch will override the previous patches.
|
||||
|
||||
Example of MergePatches in ResourceModifierRule
|
||||
```yaml
|
||||
version: v1
|
||||
resourceModifierRules:
|
||||
- conditions:
|
||||
groupResource: pods
|
||||
namespaces:
|
||||
- ns1
|
||||
mergePatches:
|
||||
- patchData: |
|
||||
{
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"foo": null
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
- The above configmap will apply the Merge Patch to all the pods in namespace ns1 and remove the annotation `foo` from the pods.
|
||||
- Both json and yaml format are supported for the patchData.
|
||||
|
||||
### New Field StrategicPatches
|
||||
StrategicPatches is a list to specify the strategic merge patches to be applied on the resource. The strategic merge patches will be applied in the order specified in the configmap. A subsequent patch is applied in order and if multiple patches are specified for the same path, the last patch will override the previous patches.
|
||||
|
||||
Example of StrategicPatches in ResourceModifierRule
|
||||
```yaml
|
||||
version: v1
|
||||
resourceModifierRules:
|
||||
- conditions:
|
||||
groupResource: pods
|
||||
resourceNameRegex: "^my-pod$"
|
||||
namespaces:
|
||||
- ns1
|
||||
strategicPatches:
|
||||
- patchData: |
|
||||
{
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "nginx",
|
||||
"image": "repo2/nginx"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
- The above configmap will apply the Strategic Merge Patch to the pod with name my-pod in namespace ns1 and update the image of container nginx to `repo2/nginx`.
|
||||
- Both json and yaml format are supported for the patchData.
|
||||
|
||||
### Conditional Patches in ALL Patch Types
|
||||
Since JSON Merge Patch and Strategic Merge Patch do not support conditional patches, we will use the `test` operation of JSON Patch to support conditional patches in all patch types by adding it to `Conditions` struct in `ResourceModifierRule`.
|
||||
|
||||
Example of test in conditions
|
||||
```yaml
|
||||
version: v1
|
||||
resourceModifierRules:
|
||||
- conditions:
|
||||
groupResource: persistentvolumeclaims.storage.k8s.io
|
||||
matches:
|
||||
- path: "/spec/storageClassName"
|
||||
value: "premium"
|
||||
mergePatches:
|
||||
- patchData: |
|
||||
{
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"foo": null
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
- The above configmap will apply the Merge Patch to all the PVCs in all namespaces with storageClassName premium and remove the annotation `foo` from the PVCs.
|
||||
- You can specify multiple rules in the `matches` list. The patch will be applied only if all the matches are satisfied.
|
||||
|
||||
### Wildcard Support for GroupResource
|
||||
The user can specify a wildcard for groupResource in the conditions' struct. This will allow the user to apply the patches for all the resources of a particular group or all resources in all groups. For example, `*.apps` will apply to all the resources in the `apps` group, `*` will apply to all the resources in all groups.
|
||||
|
||||
### Helper Command to Generate Merge Patch and Strategic Merge Patch
|
||||
The patchData of Strategic Merge Patch is sometimes a bit complex for user to write. We can provide a helper command to generate the patchData for Strategic Merge Patch. The command will take the original resource and the modified resource as input and generate the patchData.
|
||||
It can also be used in JSON Merge Patch.
|
||||
|
||||
Here is a sample code snippet to achieve this:
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pod := &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "web",
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
newPod := pod.DeepCopy()
|
||||
patch := client.StrategicMergeFrom(pod)
|
||||
newPod.Spec.Containers[0].Image = "nginx1"
|
||||
|
||||
data, _ := patch.Data(newPod)
|
||||
fmt.Println(string(data))
|
||||
// Output:
|
||||
// {"spec":{"$setElementOrder/containers":[{"name":"web"}],"containers":[{"image":"nginx1","name":"web"}]}}
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
No security impact.
|
||||
|
||||
## Compatibility
|
||||
Compatible with current Resource Modifiers.
|
||||
|
||||
## Implementation
|
||||
- Use "github.com/evanphx/json-patch" to support JSON Merge Patch.
|
||||
- Use "k8s.io/apimachinery/pkg/util/strategicpatch" to support Strategic Merge Patch.
|
||||
- Use glob to support wildcard for `groupResource` in `conditions` struct.
|
||||
- Use `test` operation of JSON Patch to calculate the `matches` in `conditions` struct.
|
||||
|
||||
## Future enhancements
|
||||
- add a Velero subcommand to generate/validate the patchData for Strategic Merge Patch and JSON Merge Patch.
|
||||
- add jq support for more complex conditions or patches, to meet the situations that the current conditions or patches can not handle. like [this issue](https://github.com/vmware-tanzu/velero/issues/6344)
|
||||
|
||||
## Open Issues
|
||||
N/A
|
||||
@@ -703,33 +703,38 @@ type Provider interface {
|
||||
In this case, we will extend the default kopia uploader to add the ability, when a given volume is for a block mode and is mapped as a device, we will use the [StreamingFile](https://pkg.go.dev/github.com/kopia/kopia@v0.13.0/fs#StreamingFile) to stream the device and backup to the kopia repository.
|
||||
|
||||
```go
|
||||
func getLocalBlockEntry(kopiaEntry fs.Entry, log logrus.FieldLogger) (fs.Entry, error) {
|
||||
path := kopiaEntry.LocalFilesystemPath()
|
||||
|
||||
fileInfo, err := os.Lstat(path)
|
||||
func getLocalBlockEntry(sourcePath string) (fs.Entry, error) {
|
||||
source, err := resolveSymlink(sourcePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Unable to get the source device information %s", path)
|
||||
return nil, errors.Wrap(err, "resolveSymlink")
|
||||
}
|
||||
|
||||
fileInfo, err := os.Lstat(source)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get the source device information %s", source)
|
||||
}
|
||||
|
||||
if (fileInfo.Sys().(*syscall.Stat_t).Mode & syscall.S_IFMT) != syscall.S_IFBLK {
|
||||
return nil, errors.Errorf("Source path %s is not a block device", path)
|
||||
return nil, errors.Errorf("source path %s is not a block device", source)
|
||||
}
|
||||
device, err := os.Open(path)
|
||||
|
||||
device, err := os.Open(source)
|
||||
if err != nil {
|
||||
if os.IsPermission(err) || err.Error() == ErrNotPermitted {
|
||||
return nil, errors.Wrapf(err, "No permission to open the source device %s, make sure that node agent is running in privileged mode", path)
|
||||
return nil, errors.Wrapf(err, "no permission to open the source device %s, make sure that node agent is running in privileged mode", source)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Unable to open the source device %s", path)
|
||||
return nil, errors.Wrapf(err, "unable to open the source device %s", source)
|
||||
}
|
||||
return virtualfs.StreamingFileFromReader(kopiaEntry.Name(), device), nil
|
||||
|
||||
sf := virtualfs.StreamingFileFromReader(source, device)
|
||||
return virtualfs.NewStaticDirectory(source, []fs.Entry{sf}), nil
|
||||
}
|
||||
```
|
||||
|
||||
In the `pkg/uploader/kopia/snapshot.go` this is used in the Backup call like
|
||||
|
||||
```go
|
||||
if volMode == PersistentVolumeFilesystem {
|
||||
if volMode == uploader.PersistentVolumeFilesystem {
|
||||
// to be consistent with restic when backup empty dir returns one error for upper logic handle
|
||||
dirs, err := os.ReadDir(source)
|
||||
if err != nil {
|
||||
@@ -742,15 +747,17 @@ In the `pkg/uploader/kopia/snapshot.go` this is used in the Backup call like
|
||||
source = filepath.Clean(source)
|
||||
...
|
||||
|
||||
sourceEntry, err := getLocalFSEntry(source)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "Unable to get local filesystem entry")
|
||||
}
|
||||
var sourceEntry fs.Entry
|
||||
|
||||
if volMode == PersistentVolumeBlock {
|
||||
sourceEntry, err = getLocalBlockEntry(sourceEntry, log)
|
||||
if volMode == uploader.PersistentVolumeBlock {
|
||||
sourceEntry, err = getLocalBlockEntry(source)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "Unable to get local block device entry")
|
||||
return nil, false, errors.Wrap(err, "unable to get local block device entry")
|
||||
}
|
||||
} else {
|
||||
sourceEntry, err = getLocalFSEntry(source)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "unable to get local filesystem entry")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -766,6 +773,8 @@ We only need to extend two functions the rest will be passed through.
|
||||
```go
|
||||
type BlockOutput struct {
|
||||
*restore.FilesystemOutput
|
||||
|
||||
targetFileName string
|
||||
}
|
||||
|
||||
var _ restore.Output = &BlockOutput{}
|
||||
@@ -773,30 +782,15 @@ var _ restore.Output = &BlockOutput{}
|
||||
const bufferSize = 128 * 1024
|
||||
|
||||
func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remoteFile fs.File) error {
|
||||
|
||||
targetFileName, err := filepath.EvalSymlinks(o.TargetPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Unable to evaluate symlinks for %s", targetFileName)
|
||||
}
|
||||
|
||||
fileInfo, err := os.Lstat(targetFileName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Unable to get the target device information for %s", targetFileName)
|
||||
}
|
||||
|
||||
if (fileInfo.Sys().(*syscall.Stat_t).Mode & syscall.S_IFMT) != syscall.S_IFBLK {
|
||||
return errors.Errorf("Target file %s is not a block device", targetFileName)
|
||||
}
|
||||
|
||||
remoteReader, err := remoteFile.Open(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to open remote file %s", remoteFile.Name())
|
||||
return errors.Wrapf(err, "failed to open remote file %s", remoteFile.Name())
|
||||
}
|
||||
defer remoteReader.Close()
|
||||
|
||||
targetFile, err := os.Create(targetFileName)
|
||||
targetFile, err := os.Create(o.targetFileName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to open file %s", targetFileName)
|
||||
return errors.Wrapf(err, "failed to open file %s", o.targetFileName)
|
||||
}
|
||||
defer targetFile.Close()
|
||||
|
||||
@@ -807,7 +801,7 @@ func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remote
|
||||
bytesToWrite, err := remoteReader.Read(buffer)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return errors.Wrapf(err, "Failed to read data from remote file %s", targetFileName)
|
||||
return errors.Wrapf(err, "failed to read data from remote file %s", o.targetFileName)
|
||||
}
|
||||
readData = false
|
||||
}
|
||||
@@ -819,7 +813,7 @@ func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remote
|
||||
bytesToWrite -= bytesWritten
|
||||
offset += bytesWritten
|
||||
} else {
|
||||
return errors.Wrapf(err, "Failed to write data to file %s", targetFileName)
|
||||
return errors.Wrapf(err, "failed to write data to file %s", o.targetFileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -829,42 +823,43 @@ func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remote
|
||||
}
|
||||
|
||||
func (o *BlockOutput) BeginDirectory(ctx context.Context, relativePath string, e fs.Directory) error {
|
||||
targetFileName, err := filepath.EvalSymlinks(o.TargetPath)
|
||||
var err error
|
||||
o.targetFileName, err = filepath.EvalSymlinks(o.TargetPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Unable to evaluate symlinks for %s", targetFileName)
|
||||
return errors.Wrapf(err, "unable to evaluate symlinks for %s", o.targetFileName)
|
||||
}
|
||||
|
||||
fileInfo, err := os.Lstat(targetFileName)
|
||||
fileInfo, err := os.Lstat(o.targetFileName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Unable to get the target device information for %s", o.TargetPath)
|
||||
return errors.Wrapf(err, "unable to get the target device information for %s", o.TargetPath)
|
||||
}
|
||||
|
||||
if (fileInfo.Sys().(*syscall.Stat_t).Mode & syscall.S_IFMT) != syscall.S_IFBLK {
|
||||
return errors.Errorf("Target file %s is not a block device", o.TargetPath)
|
||||
return errors.Errorf("target file %s is not a block device", o.TargetPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Of note, we do need to add root access to the daemon set node agent to access the new mount.
|
||||
Additional mount is required in the node-agent specification to resolve symlinks to the block devices from /host_pods/POD_ID/volumeDevices/kubernetes.io~csi directory.
|
||||
|
||||
```yaml
|
||||
...
|
||||
- mountPath: /var/lib/kubelet/plugins
|
||||
mountPropagation: HostToContainer
|
||||
name: host-plugins
|
||||
|
||||
....
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
name: host-plugins
|
||||
```
|
||||
|
||||
...
|
||||
Privileged mode is required to access the block devices in /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/publish directory as confirmed by testing on EKS and Minikube.
|
||||
|
||||
```yaml
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: &c.privilegedAgent,
|
||||
Privileged: &c.privilegedNodeAgent,
|
||||
},
|
||||
|
||||
```
|
||||
|
||||
## Plugin Data Movers
|
||||
|
||||
18
go.mod
18
go.mod
@@ -37,11 +37,11 @@ require (
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20221028150844-83b7d23a625f
|
||||
golang.org/x/mod v0.10.0
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/text v0.9.0
|
||||
golang.org/x/text v0.13.0
|
||||
google.golang.org/api v0.120.0
|
||||
google.golang.org/grpc v1.54.0
|
||||
google.golang.org/grpc v1.56.3
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.25.6
|
||||
@@ -54,12 +54,13 @@ require (
|
||||
k8s.io/metrics v0.25.6
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
|
||||
sigs.k8s.io/controller-runtime v0.12.2
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect
|
||||
@@ -140,10 +141,10 @@ require (
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
@@ -155,6 +156,7 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/component-base v0.24.2 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.13.0-velero.1
|
||||
|
||||
36
go.sum
36
go.sum
@@ -27,8 +27,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
|
||||
cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
|
||||
cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
@@ -493,8 +493,6 @@ github.com/klauspost/reedsolomon v1.11.7/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kopia/htmluibuild v0.0.0-20230326183719-f482ef17e2c9 h1:s5Wa89s8RlPjuwqd8K8kuf+T9Kz4+NsbKwR/pJ3PAT0=
|
||||
github.com/kopia/kopia v0.13.0 h1:efxs/vw1cS9HldlHcQ8TPxlsYz+cWCkiS4IWMbR3D1s=
|
||||
github.com/kopia/kopia v0.13.0/go.mod h1:Iic7CcKhsq+A7MLR9hh6VJfgpcJhLx3Kn+BgjY+azvI=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -617,6 +615,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/project-velero/kopia v0.13.0-velero.1 h1:rjiP7os4Eaek/gbyIKqzwkp2XLbJHl0NkczSgJ7AOEo=
|
||||
github.com/project-velero/kopia v0.13.0-velero.1/go.mod h1:Iic7CcKhsq+A7MLR9hh6VJfgpcJhLx3Kn+BgjY+azvI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
@@ -822,8 +822,8 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -923,8 +923,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1037,15 +1037,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1056,8 +1056,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1245,8 +1245,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1376,8 +1376,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lR
|
||||
sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE=
|
||||
sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
|
||||
sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
|
||||
sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=linux/amd64 golang:1.20-bullseye
|
||||
FROM --platform=linux/amd64 golang:1.20.10-bullseye
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ else
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDX_PLATFORMS" ]]; then
|
||||
BUILDX_PLATFORMS="linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le"
|
||||
BUILDX_PLATFORMS="linux/amd64,linux/arm64"
|
||||
fi
|
||||
|
||||
# Debugging info
|
||||
|
||||
@@ -1,36 +1,49 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..6f281b45d 100644
|
||||
index 5f939c481..3f5ea4096 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -25,12 +25,12 @@ require (
|
||||
@@ -24,13 +24,13 @@ require (
|
||||
github.com/restic/chunker v0.4.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/crypto v0.5.0
|
||||
- golang.org/x/crypto v0.5.0
|
||||
- golang.org/x/net v0.5.0
|
||||
+ golang.org/x/net v0.7.0
|
||||
+ golang.org/x/crypto v0.14.0
|
||||
+ golang.org/x/net v0.17.0
|
||||
golang.org/x/oauth2 v0.4.0
|
||||
golang.org/x/sync v0.1.0
|
||||
- golang.org/x/sys v0.4.0
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
+ golang.org/x/sys v0.5.0
|
||||
+ golang.org/x/term v0.5.0
|
||||
+ golang.org/x/text v0.7.0
|
||||
+ golang.org/x/sys v0.13.0
|
||||
+ golang.org/x/term v0.13.0
|
||||
+ golang.org/x/text v0.13.0
|
||||
google.golang.org/api v0.106.0
|
||||
)
|
||||
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..da35b7a6c 100644
|
||||
index 026e1d2fa..f24fef5b8 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -172,8 +172,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -189,8 +189,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
@@ -40,21 +53,21 @@ index 026e1d2fa..da35b7a6c 100644
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
||||
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
||||
@@ -71,7 +71,7 @@ func (n *namespacedFileStore) Path(selector *corev1api.SecretKeySelector) (strin
|
||||
|
||||
keyFilePath := filepath.Join(n.fsRoot, fmt.Sprintf("%s-%s", selector.Name, selector.Key))
|
||||
|
||||
file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE, 0644)
|
||||
file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to open credentials file for writing")
|
||||
}
|
||||
|
||||
45
internal/resourcemodifiers/json_merge_patch.go
Normal file
45
internal/resourcemodifiers/json_merge_patch.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package resourcemodifiers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type JSONMergePatch struct {
|
||||
PatchData string `json:"patchData,omitempty"`
|
||||
}
|
||||
|
||||
type JSONMergePatcher struct {
|
||||
patches []JSONMergePatch
|
||||
}
|
||||
|
||||
func (p *JSONMergePatcher) Patch(u *unstructured.Unstructured, _ logrus.FieldLogger) (*unstructured.Unstructured, error) {
|
||||
objBytes, err := u.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in marshaling object %s", err)
|
||||
}
|
||||
|
||||
for _, patch := range p.patches {
|
||||
patchBytes, err := yaml.YAMLToJSON([]byte(patch.PatchData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting YAML to JSON %s", err)
|
||||
}
|
||||
|
||||
objBytes, err = jsonpatch.MergePatch(objBytes, patchBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in applying JSON Patch: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
updated := &unstructured.Unstructured{}
|
||||
err = updated.UnmarshalJSON(objBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in unmarshalling modified object %s", err.Error())
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
41
internal/resourcemodifiers/json_merge_patch_test.go
Normal file
41
internal/resourcemodifiers/json_merge_patch_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package resourcemodifiers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
func TestJsonMergePatchFailure(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
data string
|
||||
}{
|
||||
{
|
||||
name: "patch with bad yaml",
|
||||
data: "a: b:",
|
||||
},
|
||||
{
|
||||
name: "patch with bad json",
|
||||
data: `{"a"::1}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
assert.NoError(t, err)
|
||||
pt := &JSONMergePatcher{
|
||||
patches: []JSONMergePatch{{PatchData: tt.data}},
|
||||
}
|
||||
|
||||
u := &unstructured.Unstructured{}
|
||||
_, err = pt.Patch(u, logrus.New())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
103
internal/resourcemodifiers/json_patch.go
Normal file
103
internal/resourcemodifiers/json_patch.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package resourcemodifiers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
type JSONPatch struct {
|
||||
Operation string `json:"operation"`
|
||||
From string `json:"from,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (p *JSONPatch) ToString() string {
|
||||
if addQuotes(&p.Value) {
|
||||
return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": "%s"}`, p.Operation, p.From, p.Path, p.Value)
|
||||
}
|
||||
return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": %s}`, p.Operation, p.From, p.Path, p.Value)
|
||||
}
|
||||
|
||||
func addQuotes(value *string) bool {
|
||||
if *value == "" {
|
||||
return true
|
||||
}
|
||||
// if value is escaped, remove escape and add quotes
|
||||
// this is useful for scenarios where boolean, null and numbers are required to be set as string.
|
||||
if strings.HasPrefix(*value, "\"") && strings.HasSuffix(*value, "\"") {
|
||||
*value = strings.TrimPrefix(*value, "\"")
|
||||
*value = strings.TrimSuffix(*value, "\"")
|
||||
return true
|
||||
}
|
||||
// if value is null, then don't add quotes
|
||||
if *value == "null" {
|
||||
return false
|
||||
}
|
||||
// if value is a boolean, then don't add quotes
|
||||
if strings.ToLower(*value) == "true" || strings.ToLower(*value) == "false" {
|
||||
return false
|
||||
}
|
||||
// if value is a json object or array, then don't add quotes.
|
||||
if strings.HasPrefix(*value, "{") || strings.HasPrefix(*value, "[") {
|
||||
return false
|
||||
}
|
||||
// if value is a number, then don't add quotes
|
||||
if _, err := strconv.ParseFloat(*value, 64); err == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type JSONPatcher struct {
|
||||
patches []JSONPatch `yaml:"patches"`
|
||||
}
|
||||
|
||||
func (p *JSONPatcher) Patch(u *unstructured.Unstructured, logger logrus.FieldLogger) (*unstructured.Unstructured, error) {
|
||||
modifiedObjBytes, err := p.applyPatch(u)
|
||||
if err != nil {
|
||||
if errors.Is(err, jsonpatch.ErrTestFailed) {
|
||||
logger.Infof("Test operation failed for JSON Patch %s", err.Error())
|
||||
return u.DeepCopy(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("error in applying JSON Patch %s", err.Error())
|
||||
}
|
||||
|
||||
updated := &unstructured.Unstructured{}
|
||||
err = updated.UnmarshalJSON(modifiedObjBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in unmarshalling modified object %s", err.Error())
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
func (p *JSONPatcher) applyPatch(u *unstructured.Unstructured) ([]byte, error) {
|
||||
patchBytes := p.patchArrayToByteArray()
|
||||
jsonPatch, err := jsonpatch.DecodePatch(patchBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in decoding json patch %s", err.Error())
|
||||
}
|
||||
|
||||
objBytes, err := u.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in marshaling object %s", err.Error())
|
||||
}
|
||||
|
||||
return jsonPatch.Apply(objBytes)
|
||||
}
|
||||
|
||||
func (p *JSONPatcher) patchArrayToByteArray() []byte {
|
||||
var patches []string
|
||||
for _, patch := range p.patches {
|
||||
patches = append(patches, patch.ToString())
|
||||
}
|
||||
patchesStr := strings.Join(patches, ",\n\t")
|
||||
return []byte(fmt.Sprintf(`[%s]`, patchesStr))
|
||||
}
|
||||
@@ -2,17 +2,18 @@ package resourcemodifiers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/gobwas/glob"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/collections"
|
||||
)
|
||||
@@ -22,27 +23,29 @@ const (
|
||||
ResourceModifierSupportedVersionV1 = "v1"
|
||||
)
|
||||
|
||||
type JSONPatch struct {
|
||||
Operation string `yaml:"operation"`
|
||||
From string `yaml:"from,omitempty"`
|
||||
Path string `yaml:"path"`
|
||||
Value string `yaml:"value,omitempty"`
|
||||
type MatchRule struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type Conditions struct {
|
||||
Namespaces []string `yaml:"namespaces,omitempty"`
|
||||
GroupKind string `yaml:"groupKind"`
|
||||
ResourceNameRegex string `yaml:"resourceNameRegex"`
|
||||
Namespaces []string `json:"namespaces,omitempty"`
|
||||
GroupResource string `json:"groupResource"`
|
||||
ResourceNameRegex string `json:"resourceNameRegex,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
Matches []MatchRule `json:"matches,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModifierRule struct {
|
||||
Conditions Conditions `yaml:"conditions"`
|
||||
Patches []JSONPatch `yaml:"patches"`
|
||||
Conditions Conditions `json:"conditions"`
|
||||
Patches []JSONPatch `json:"patches,omitempty"`
|
||||
MergePatches []JSONMergePatch `json:"mergePatches,omitempty"`
|
||||
StrategicPatches []StrategicMergePatch `json:"strategicPatches,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModifiers struct {
|
||||
Version string `yaml:"version"`
|
||||
ResourceModifierRules []ResourceModifierRule `yaml:"resourceModifierRules"`
|
||||
Version string `json:"version"`
|
||||
ResourceModifierRules []ResourceModifierRule `json:"resourceModifierRules"`
|
||||
}
|
||||
|
||||
func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error) {
|
||||
@@ -50,7 +53,7 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error
|
||||
return nil, fmt.Errorf("could not parse config from nil configmap")
|
||||
}
|
||||
if len(cm.Data) != 1 {
|
||||
return nil, fmt.Errorf("illegal resource modifiers %s/%s configmap", cm.Name, cm.Namespace)
|
||||
return nil, fmt.Errorf("illegal resource modifiers %s/%s configmap", cm.Namespace, cm.Name)
|
||||
}
|
||||
|
||||
var yamlData string
|
||||
@@ -58,7 +61,7 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error
|
||||
yamlData = v
|
||||
}
|
||||
|
||||
resModifiers, err := unmarshalResourceModifiers(&yamlData)
|
||||
resModifiers, err := unmarshalResourceModifiers([]byte(yamlData))
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
@@ -66,10 +69,10 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error
|
||||
return resModifiers, nil
|
||||
}
|
||||
|
||||
func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstructured, groupResource string, log logrus.FieldLogger) []error {
|
||||
func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) []error {
|
||||
var errs []error
|
||||
for _, rule := range p.ResourceModifierRules {
|
||||
err := rule.Apply(obj, groupResource, log)
|
||||
err := rule.apply(obj, groupResource, scheme, log)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
@@ -78,14 +81,25 @@ func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstruc
|
||||
return errs
|
||||
}
|
||||
|
||||
func (r *ResourceModifierRule) Apply(obj *unstructured.Unstructured, groupResource string, log logrus.FieldLogger) error {
|
||||
namespaceInclusion := collections.NewIncludesExcludes().Includes(r.Conditions.Namespaces...)
|
||||
if !namespaceInclusion.ShouldInclude(obj.GetNamespace()) {
|
||||
return nil
|
||||
}
|
||||
if !strings.EqualFold(groupResource, r.Conditions.GroupKind) {
|
||||
func (r *ResourceModifierRule) apply(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) error {
|
||||
ns := obj.GetNamespace()
|
||||
if ns != "" {
|
||||
namespaceInclusion := collections.NewIncludesExcludes().Includes(r.Conditions.Namespaces...)
|
||||
if !namespaceInclusion.ShouldInclude(ns) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
g, err := glob.Compile(r.Conditions.GroupResource, '.')
|
||||
if err != nil {
|
||||
log.Errorf("Bad glob pattern of groupResource in condition, groupResource: %s, err: %s", r.Conditions.GroupResource, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !g.Match(groupResource) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Conditions.ResourceNameRegex != "" {
|
||||
match, err := regexp.MatchString(r.Conditions.ResourceNameRegex, obj.GetName())
|
||||
if err != nil {
|
||||
@@ -95,94 +109,93 @@ func (r *ResourceModifierRule) Apply(obj *unstructured.Unstructured, groupResour
|
||||
return nil
|
||||
}
|
||||
}
|
||||
patches, err := r.PatchArrayToByteArray()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Applying resource modifier patch on %s/%s", obj.GetNamespace(), obj.GetName())
|
||||
err = ApplyPatch(patches, obj, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert all JsonPatch to string array with the format of jsonpatch.Patch and then convert it to byte array
|
||||
func (r *ResourceModifierRule) PatchArrayToByteArray() ([]byte, error) {
|
||||
var patches []string
|
||||
for _, patch := range r.Patches {
|
||||
patches = append(patches, patch.ToString())
|
||||
}
|
||||
patchesStr := strings.Join(patches, ",\n\t")
|
||||
return []byte(fmt.Sprintf(`[%s]`, patchesStr)), nil
|
||||
}
|
||||
|
||||
func (p *JSONPatch) ToString() string {
|
||||
if addQuotes(p.Value) {
|
||||
return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": "%s"}`, p.Operation, p.From, p.Path, p.Value)
|
||||
}
|
||||
return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": %s}`, p.Operation, p.From, p.Path, p.Value)
|
||||
}
|
||||
|
||||
func ApplyPatch(patch []byte, obj *unstructured.Unstructured, log logrus.FieldLogger) error {
|
||||
jsonPatch, err := jsonpatch.DecodePatch(patch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in decoding json patch %s", err.Error())
|
||||
}
|
||||
objBytes, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in marshaling object %s", err.Error())
|
||||
}
|
||||
modifiedObjBytes, err := jsonPatch.Apply(objBytes)
|
||||
if err != nil {
|
||||
if errors.Is(err, jsonpatch.ErrTestFailed) {
|
||||
log.Infof("Test operation failed for JSON Patch %s", err.Error())
|
||||
if r.Conditions.LabelSelector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(r.Conditions.LabelSelector)
|
||||
if err != nil {
|
||||
return errors.Errorf("error in creating label selector %s", err.Error())
|
||||
}
|
||||
if !selector.Matches(labels.Set(obj.GetLabels())) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error in applying JSON Patch %s", err.Error())
|
||||
}
|
||||
err = obj.UnmarshalJSON(modifiedObjBytes)
|
||||
|
||||
match, err := matchConditions(obj, r.Conditions.Matches, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in unmarshalling modified object %s", err.Error())
|
||||
return err
|
||||
} else if !match {
|
||||
log.Info("Conditions do not match, skip it")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("Applying resource modifier patch on %s/%s", obj.GetNamespace(), obj.GetName())
|
||||
err = r.applyPatch(obj, scheme, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalResourceModifiers(yamlData *string) (*ResourceModifiers, error) {
|
||||
resModifiers := &ResourceModifiers{}
|
||||
err := decodeStruct(strings.NewReader(*yamlData), resModifiers)
|
||||
func matchConditions(u *unstructured.Unstructured, rules []MatchRule, _ logrus.FieldLogger) (bool, error) {
|
||||
if len(rules) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var fixed []JSONPatch
|
||||
for _, rule := range rules {
|
||||
if rule.Path == "" {
|
||||
return false, fmt.Errorf("path is required for match rule")
|
||||
}
|
||||
|
||||
fixed = append(fixed, JSONPatch{
|
||||
Operation: "test",
|
||||
Path: rule.Path,
|
||||
Value: rule.Value,
|
||||
})
|
||||
}
|
||||
|
||||
p := &JSONPatcher{patches: fixed}
|
||||
_, err := p.applyPatch(u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode yaml data into resource modifiers %v", err)
|
||||
if errors.Is(err, jsonpatch.ErrTestFailed) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func unmarshalResourceModifiers(yamlData []byte) (*ResourceModifiers, error) {
|
||||
resModifiers := &ResourceModifiers{}
|
||||
err := yaml.UnmarshalStrict(yamlData, resModifiers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode yaml data into resource modifiers, err: %s", err)
|
||||
}
|
||||
return resModifiers, nil
|
||||
}
|
||||
|
||||
// decodeStruct restrict validate the keys in decoded mappings to exist as fields in the struct being decoded into
|
||||
func decodeStruct(r io.Reader, s interface{}) error {
|
||||
dec := yaml.NewDecoder(r)
|
||||
dec.KnownFields(true)
|
||||
return dec.Decode(s)
|
||||
type patcher interface {
|
||||
Patch(u *unstructured.Unstructured, logger logrus.FieldLogger) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
func addQuotes(value string) bool {
|
||||
if value == "" {
|
||||
return true
|
||||
func (r *ResourceModifierRule) applyPatch(u *unstructured.Unstructured, scheme *runtime.Scheme, logger logrus.FieldLogger) error {
|
||||
var p patcher
|
||||
if len(r.Patches) > 0 {
|
||||
p = &JSONPatcher{patches: r.Patches}
|
||||
} else if len(r.MergePatches) > 0 {
|
||||
p = &JSONMergePatcher{patches: r.MergePatches}
|
||||
} else if len(r.StrategicPatches) > 0 {
|
||||
p = &StrategicMergePatcher{patches: r.StrategicPatches, scheme: scheme}
|
||||
} else {
|
||||
return fmt.Errorf("no patch data found")
|
||||
}
|
||||
// if value is null, then don't add quotes
|
||||
if value == "null" {
|
||||
return false
|
||||
|
||||
updated, err := p.Patch(u, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in applying patch %s", err)
|
||||
}
|
||||
// if value is a boolean, then don't add quotes
|
||||
if _, err := strconv.ParseBool(value); err == nil {
|
||||
return false
|
||||
}
|
||||
// if value is a json object or array, then don't add quotes.
|
||||
if strings.HasPrefix(value, "{") || strings.HasPrefix(value, "[") {
|
||||
return false
|
||||
}
|
||||
// if value is a number, then don't add quotes
|
||||
if _, err := strconv.ParseFloat(value, 64); err == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
u.SetUnstructuredContent(updated.Object)
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,29 @@
|
||||
package resourcemodifiers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (r *ResourceModifierRule) Validate() error {
|
||||
if err := r.Conditions.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, size := range []int{
|
||||
len(r.Patches),
|
||||
len(r.MergePatches),
|
||||
len(r.StrategicPatches),
|
||||
} {
|
||||
if size != 0 {
|
||||
count++
|
||||
}
|
||||
if count >= 2 {
|
||||
return fmt.Errorf("only one of patches, mergePatches, strategicPatches can be specified")
|
||||
}
|
||||
}
|
||||
|
||||
for _, patch := range r.Patches {
|
||||
if err := patch.Validate(); err != nil {
|
||||
return err
|
||||
@@ -48,8 +62,8 @@ func (p *JSONPatch) Validate() error {
|
||||
}
|
||||
|
||||
func (c *Conditions) Validate() error {
|
||||
if c.GroupKind == "" {
|
||||
return fmt.Errorf("groupkind cannot be empty")
|
||||
if c.GroupResource == "" {
|
||||
return fmt.Errorf("groupkResource cannot be empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestResourceModifiers_Validate(t *testing.T) {
|
||||
ResourceModifierRules: []ResourceModifierRule{
|
||||
{
|
||||
Conditions: Conditions{
|
||||
GroupKind: "persistentvolumeclaims",
|
||||
GroupResource: "persistentvolumeclaims",
|
||||
ResourceNameRegex: ".*",
|
||||
Namespaces: []string{"bar", "foo"},
|
||||
},
|
||||
@@ -44,7 +44,7 @@ func TestResourceModifiers_Validate(t *testing.T) {
|
||||
ResourceModifierRules: []ResourceModifierRule{
|
||||
{
|
||||
Conditions: Conditions{
|
||||
GroupKind: "persistentvolumeclaims",
|
||||
GroupResource: "persistentvolumeclaims",
|
||||
ResourceNameRegex: ".*",
|
||||
Namespaces: []string{"bar", "foo"},
|
||||
},
|
||||
@@ -75,7 +75,7 @@ func TestResourceModifiers_Validate(t *testing.T) {
|
||||
ResourceModifierRules: []ResourceModifierRule{
|
||||
{
|
||||
Conditions: Conditions{
|
||||
GroupKind: "persistentvolumeclaims",
|
||||
GroupResource: "persistentvolumeclaims",
|
||||
ResourceNameRegex: ".*",
|
||||
Namespaces: []string{"bar", "foo"},
|
||||
},
|
||||
@@ -92,13 +92,13 @@ func TestResourceModifiers_Validate(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Condition has empty GroupKind",
|
||||
name: "Condition has empty GroupResource",
|
||||
fields: fields{
|
||||
Version: "v1",
|
||||
ResourceModifierRules: []ResourceModifierRule{
|
||||
{
|
||||
Conditions: Conditions{
|
||||
GroupKind: "",
|
||||
GroupResource: "",
|
||||
ResourceNameRegex: ".*",
|
||||
Namespaces: []string{"bar", "foo"},
|
||||
},
|
||||
@@ -114,6 +114,32 @@ func TestResourceModifiers_Validate(t *testing.T) {
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "More than one patch type in a rule",
|
||||
fields: fields{
|
||||
Version: "v1",
|
||||
ResourceModifierRules: []ResourceModifierRule{
|
||||
{
|
||||
Conditions: Conditions{
|
||||
GroupResource: "*",
|
||||
},
|
||||
Patches: []JSONPatch{
|
||||
{
|
||||
Operation: "test",
|
||||
Path: "/spec/storageClassName",
|
||||
Value: "premium",
|
||||
},
|
||||
},
|
||||
MergePatches: []JSONMergePatch{
|
||||
{
|
||||
PatchData: `{"metadata":{"labels":{"a":null}}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
143
internal/resourcemodifiers/strategic_merge_patch.go
Normal file
143
internal/resourcemodifiers/strategic_merge_patch.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package resourcemodifiers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/mergepatch"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
kubejson "sigs.k8s.io/json"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type StrategicMergePatch struct {
|
||||
PatchData string `json:"patchData,omitempty"`
|
||||
}
|
||||
|
||||
type StrategicMergePatcher struct {
|
||||
patches []StrategicMergePatch
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (p *StrategicMergePatcher) Patch(u *unstructured.Unstructured, _ logrus.FieldLogger) (*unstructured.Unstructured, error) {
|
||||
gvk := u.GetObjectKind().GroupVersionKind()
|
||||
schemaReferenceObj, err := p.scheme.New(gvk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
origin := u.DeepCopy()
|
||||
updated := u.DeepCopy()
|
||||
for _, patch := range p.patches {
|
||||
patchBytes, err := yaml.YAMLToJSON([]byte(patch.PatchData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting YAML to JSON %s", err)
|
||||
}
|
||||
|
||||
err = strategicPatchObject(origin, patchBytes, updated, schemaReferenceObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in applying Strategic Patch %s", err.Error())
|
||||
}
|
||||
|
||||
origin = updated.DeepCopy()
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// strategicPatchObject applies a strategic merge patch of `patchBytes` to
|
||||
// `originalObject` and stores the result in `objToUpdate`.
|
||||
// It additionally returns the map[string]interface{} representation of the
|
||||
// `originalObject` and `patchBytes`.
|
||||
// NOTE: Both `originalObject` and `objToUpdate` are supposed to be versioned.
|
||||
func strategicPatchObject(
|
||||
originalObject runtime.Object,
|
||||
patchBytes []byte,
|
||||
objToUpdate runtime.Object,
|
||||
schemaReferenceObj runtime.Object,
|
||||
) error {
|
||||
originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchMap := make(map[string]interface{})
|
||||
var strictErrs []error
|
||||
strictErrs, err = kubejson.UnmarshalStrict(patchBytes, &patchMap)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
if err := applyPatchToObject(originalObjMap, patchMap, objToUpdate, schemaReferenceObj, strictErrs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyPatchToObject applies a strategic merge patch of <patchMap> to
|
||||
// <originalMap> and stores the result in <objToUpdate>.
|
||||
// NOTE: <objToUpdate> must be a versioned object.
|
||||
func applyPatchToObject(
|
||||
originalMap map[string]interface{},
|
||||
patchMap map[string]interface{},
|
||||
objToUpdate runtime.Object,
|
||||
schemaReferenceObj runtime.Object,
|
||||
strictErrs []error,
|
||||
) error {
|
||||
patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj)
|
||||
if err != nil {
|
||||
return interpretStrategicMergePatchError(err)
|
||||
}
|
||||
|
||||
// Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object
|
||||
converter := runtime.DefaultUnstructuredConverter
|
||||
if err := converter.FromUnstructuredWithValidation(patchedObjMap, objToUpdate, true); err != nil {
|
||||
strictError, isStrictError := runtime.AsStrictDecodingError(err)
|
||||
switch {
|
||||
case !isStrictError:
|
||||
// disregard any sttrictErrs, because it's an incomplete
|
||||
// list of strict errors given that we don't know what fields were
|
||||
// unknown because StrategicMergeMapPatch failed.
|
||||
// Non-strict errors trump in this case.
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()),
|
||||
})
|
||||
//case validationDirective == metav1.FieldValidationWarn:
|
||||
// addStrictDecodingWarnings(requestContext, append(strictErrs, strictError.Errors()...))
|
||||
default:
|
||||
strictDecodingError := runtime.NewStrictDecodingError(append(strictErrs, strictError.Errors()...))
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), strictDecodingError.Error()),
|
||||
})
|
||||
}
|
||||
} else if len(strictErrs) > 0 {
|
||||
switch {
|
||||
//case validationDirective == metav1.FieldValidationWarn:
|
||||
// addStrictDecodingWarnings(requestContext, strictErrs)
|
||||
default:
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code.
|
||||
func interpretStrategicMergePatchError(err error) error {
|
||||
switch err {
|
||||
case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat:
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys:
|
||||
return apierrors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
52
internal/resourcemodifiers/strategic_merge_patch_test.go
Normal file
52
internal/resourcemodifiers/strategic_merge_patch_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package resourcemodifiers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
func TestStrategicMergePatchFailure(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
data string
|
||||
kind string
|
||||
}{
|
||||
{
|
||||
name: "patch with unknown kind",
|
||||
data: "{}",
|
||||
kind: "BadKind",
|
||||
},
|
||||
{
|
||||
name: "patch with bad yaml",
|
||||
data: "a: b:",
|
||||
kind: "Pod",
|
||||
},
|
||||
{
|
||||
name: "patch with bad json",
|
||||
data: `{"a"::1}`,
|
||||
kind: "Pod",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
assert.NoError(t, err)
|
||||
pt := &StrategicMergePatcher{
|
||||
patches: []StrategicMergePatch{{PatchData: tt.data}},
|
||||
scheme: scheme,
|
||||
}
|
||||
|
||||
u := &unstructured.Unstructured{}
|
||||
u.SetGroupVersionKind(schema.GroupVersionKind{Version: "v1", Kind: tt.kind})
|
||||
_, err = pt.Patch(u, logrus.New())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -132,7 +132,7 @@ func GetResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
|
||||
return nil, fmt.Errorf("could not parse config from nil configmap")
|
||||
}
|
||||
if len(cm.Data) != 1 {
|
||||
return nil, fmt.Errorf("illegal resource policies %s/%s configmap", cm.Name, cm.Namespace)
|
||||
return nil, fmt.Errorf("illegal resource policies %s/%s configmap", cm.Namespace, cm.Name)
|
||||
}
|
||||
|
||||
var yamlData string
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// TODO(2.0) After converting all controllers to runtime-controller,
|
||||
// the functions in this file will no longer be needed and should be removed.
|
||||
package managercontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/controller"
|
||||
)
|
||||
|
||||
// Runnable will turn a "regular" runnable component (such as a controller)
|
||||
// into a controller-runtime Runnable
|
||||
func Runnable(p controller.Interface, numWorkers int) manager.Runnable {
|
||||
// Pass the provided Context down to the run function.
|
||||
f := func(ctx context.Context) error {
|
||||
return p.Run(ctx, numWorkers)
|
||||
}
|
||||
return manager.RunnableFunc(f)
|
||||
}
|
||||
@@ -89,6 +89,14 @@ const (
|
||||
|
||||
// ResourceUsageLabel is the label key to explain the Velero resource usage.
|
||||
ResourceUsageLabel = "velero.io/resource-usage"
|
||||
|
||||
// VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes
|
||||
// need to be backed up using pod volume backup.
|
||||
VolumesToBackupAnnotation = "backup.velero.io/backup-volumes"
|
||||
|
||||
// VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes
|
||||
// should be excluded from pod volume backup.
|
||||
VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes"
|
||||
)
|
||||
|
||||
type AsyncOperationIDPrefix string
|
||||
|
||||
@@ -1,34 +1,17 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CSISnapshotSpec) DeepCopyInto(out *CSISnapshotSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotSpec.
|
||||
@@ -48,7 +31,6 @@ func (in *DataDownload) DeepCopyInto(out *DataDownload) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownload.
|
||||
@@ -81,7 +63,6 @@ func (in *DataDownloadList) DeepCopyInto(out *DataDownloadList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadList.
|
||||
@@ -114,7 +95,6 @@ func (in *DataDownloadSpec) DeepCopyInto(out *DataDownloadSpec) {
|
||||
}
|
||||
}
|
||||
out.OperationTimeout = in.OperationTimeout
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadSpec.
|
||||
@@ -139,7 +119,6 @@ func (in *DataDownloadStatus) DeepCopyInto(out *DataDownloadStatus) {
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.Progress = in.Progress
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadStatus.
|
||||
@@ -159,7 +138,6 @@ func (in *DataUpload) DeepCopyInto(out *DataUpload) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUpload.
|
||||
@@ -192,7 +170,6 @@ func (in *DataUploadList) DeepCopyInto(out *DataUploadList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadList.
|
||||
@@ -227,7 +204,6 @@ func (in *DataUploadResult) DeepCopyInto(out *DataUploadResult) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadResult.
|
||||
@@ -260,7 +236,6 @@ func (in *DataUploadSpec) DeepCopyInto(out *DataUploadSpec) {
|
||||
}
|
||||
}
|
||||
out.OperationTimeout = in.OperationTimeout
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadSpec.
|
||||
@@ -296,7 +271,6 @@ func (in *DataUploadStatus) DeepCopyInto(out *DataUploadStatus) {
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.Progress = in.Progress
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadStatus.
|
||||
@@ -312,7 +286,6 @@ func (in *DataUploadStatus) DeepCopy() *DataUploadStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetVolumeSpec) DeepCopyInto(out *TargetVolumeSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetVolumeSpec.
|
||||
|
||||
@@ -52,6 +52,7 @@ import (
|
||||
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/podvolume"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
pdvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
)
|
||||
|
||||
@@ -200,7 +201,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
||||
// Get the list of volumes to back up using pod volume backup from the pod's annotations. Remove from this list
|
||||
// any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario,
|
||||
// where it's been backed up from another pod), since we don't need >1 backup per PVC.
|
||||
includedVolumes, optedOutVolumes := podvolume.GetVolumesByPod(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup))
|
||||
includedVolumes, optedOutVolumes := pdvolumeutil.GetVolumesByPod(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup))
|
||||
for _, volume := range includedVolumes {
|
||||
// track the volumes that are PVCs using the PVC snapshot tracker, so that when we backup PVCs/PVs
|
||||
// via an item action in the next step, we don't snapshot PVs that will have their data backed up
|
||||
|
||||
@@ -196,9 +196,8 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
|
||||
log.Info("Getting items for resource")
|
||||
|
||||
var (
|
||||
gvr = gv.WithResource(resource.Name)
|
||||
gr = gvr.GroupResource()
|
||||
clusterScoped = !resource.Namespaced
|
||||
gvr = gv.WithResource(resource.Name)
|
||||
gr = gvr.GroupResource()
|
||||
)
|
||||
|
||||
orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
|
||||
@@ -272,8 +271,6 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
|
||||
}
|
||||
}
|
||||
|
||||
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
|
||||
|
||||
// Handle namespace resource here.
|
||||
// Namespace are only filtered by namespace include/exclude filters.
|
||||
// Label selectors are not checked.
|
||||
@@ -289,10 +286,12 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
items := r.backupNamespaces(unstructuredList, namespacesToList, gr, preferredGVR, log)
|
||||
items := r.backupNamespaces(unstructuredList, r.backupRequest.NamespaceIncludesExcludes, gr, preferredGVR, log)
|
||||
|
||||
return items, nil
|
||||
}
|
||||
clusterScoped := !resource.Namespaced
|
||||
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
|
||||
|
||||
// If we get here, we're backing up something other than namespaces
|
||||
if clusterScoped {
|
||||
@@ -533,31 +532,13 @@ func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstr
|
||||
|
||||
// backupNamespaces process namespace resource according to namespace filters.
|
||||
func (r *itemCollector) backupNamespaces(unstructuredList *unstructured.UnstructuredList,
|
||||
namespacesToList []string, gr schema.GroupResource, preferredGVR schema.GroupVersionResource,
|
||||
ie *collections.IncludesExcludes, gr schema.GroupResource, preferredGVR schema.GroupVersionResource,
|
||||
log logrus.FieldLogger) []*kubernetesResource {
|
||||
var items []*kubernetesResource
|
||||
for index, unstructured := range unstructuredList.Items {
|
||||
found := false
|
||||
if len(namespacesToList) == 0 {
|
||||
// No namespace found. By far, this condition cannot be triggered. Either way,
|
||||
// namespacesToList is not empty.
|
||||
log.Debug("Skip namespace resource, because no item found by namespace filters.")
|
||||
break
|
||||
} else if len(namespacesToList) == 1 && namespacesToList[0] == "" {
|
||||
// All namespaces are included.
|
||||
log.Debugf("Backup namespace %s due to full cluster backup.", unstructured.GetName())
|
||||
found = true
|
||||
} else {
|
||||
for _, ns := range namespacesToList {
|
||||
if unstructured.GetName() == ns {
|
||||
log.Debugf("Backup namespace %s due to namespace filters setting.", unstructured.GetName())
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if ie.ShouldInclude(unstructured.GetName()) {
|
||||
log.Debugf("Backup namespace %s due to namespace filters setting.", unstructured.GetName())
|
||||
|
||||
if found {
|
||||
path, err := r.writeToFile(&unstructuredList.Items[index])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error writing item to file")
|
||||
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
@@ -51,7 +49,6 @@ type Request struct {
|
||||
VolumeSnapshots []*volume.Snapshot
|
||||
PodVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
BackedUpItems map[itemKey]struct{}
|
||||
CSISnapshots []snapshotv1api.VolumeSnapshot
|
||||
itemOperationsList *[]*itemoperation.BackupOperation
|
||||
ResPolicies *resourcepolicies.Policies
|
||||
SkippedPVTracker *skipPVTracker
|
||||
|
||||
68
pkg/backup/snapshots.go
Normal file
68
pkg/backup/snapshots.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/features"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
)
|
||||
|
||||
// Common function to update the status of CSI snapshots
|
||||
// returns VolumeSnapshot, VolumeSnapshotContent, VolumeSnapshotClasses referenced
|
||||
func UpdateBackupCSISnapshotsStatus(client kbclient.Client, volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, backup *velerov1api.Backup, backupLog logrus.FieldLogger) (volumeSnapshots []snapshotv1api.VolumeSnapshot, volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass) {
|
||||
if boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData) {
|
||||
backupLog.Info("backup SnapshotMoveData is set to true, skip VolumeSnapshot resource persistence.")
|
||||
} else if features.IsEnabled(velerov1api.CSIFeatureFlag) {
|
||||
selector := label.NewSelectorForBackup(backup.Name)
|
||||
vscList := &snapshotv1api.VolumeSnapshotContentList{}
|
||||
|
||||
if volumeSnapshotLister != nil {
|
||||
tmpVSs, err := volumeSnapshotLister.List(label.NewSelectorForBackup(backup.Name))
|
||||
if err != nil {
|
||||
backupLog.Error(err)
|
||||
}
|
||||
for _, vs := range tmpVSs {
|
||||
volumeSnapshots = append(volumeSnapshots, *vs)
|
||||
}
|
||||
}
|
||||
|
||||
err := client.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
backupLog.Error(err)
|
||||
}
|
||||
if len(vscList.Items) >= 0 {
|
||||
volumeSnapshotContents = vscList.Items
|
||||
}
|
||||
|
||||
vsClassSet := sets.NewString()
|
||||
for index := range volumeSnapshotContents {
|
||||
// persist the volumesnapshotclasses referenced by vsc
|
||||
if volumeSnapshotContents[index].Spec.VolumeSnapshotClassName != nil && !vsClassSet.Has(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName) {
|
||||
vsClass := &snapshotv1api.VolumeSnapshotClass{}
|
||||
if err := client.Get(context.TODO(), kbclient.ObjectKey{Name: *volumeSnapshotContents[index].Spec.VolumeSnapshotClassName}, vsClass); err != nil {
|
||||
backupLog.Error(err)
|
||||
} else {
|
||||
vsClassSet.Insert(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName)
|
||||
volumeSnapshotClasses = append(volumeSnapshotClasses, *vsClass)
|
||||
}
|
||||
}
|
||||
}
|
||||
backup.Status.CSIVolumeSnapshotsAttempted = len(volumeSnapshots)
|
||||
csiVolumeSnapshotsCompleted := 0
|
||||
for _, vs := range volumeSnapshots {
|
||||
if vs.Status != nil && boolptr.IsSetToTrue(vs.Status.ReadyToUse) {
|
||||
csiVolumeSnapshotsCompleted++
|
||||
}
|
||||
}
|
||||
backup.Status.CSIVolumeSnapshotsCompleted = csiVolumeSnapshotsCompleted
|
||||
}
|
||||
return volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses
|
||||
}
|
||||
@@ -299,3 +299,9 @@ func (b *BackupBuilder) DataMover(name string) *BackupBuilder {
|
||||
b.object.Spec.DataMover = name
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStatus sets the Backup's status.
|
||||
func (b *BackupBuilder) WithStatus(status velerov1api.BackupStatus) *BackupBuilder {
|
||||
b.object.Status = status
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -95,6 +95,12 @@ func (b *PersistentVolumeBuilder) StorageClass(name string) *PersistentVolumeBui
|
||||
return b
|
||||
}
|
||||
|
||||
// VolumeMode sets the PersistentVolume's volume mode.
|
||||
func (b *PersistentVolumeBuilder) VolumeMode(volMode corev1api.PersistentVolumeMode) *PersistentVolumeBuilder {
|
||||
b.object.Spec.VolumeMode = &volMode
|
||||
return b
|
||||
}
|
||||
|
||||
// NodeAffinityRequired sets the PersistentVolume's NodeAffinity Requirement.
|
||||
func (b *PersistentVolumeBuilder) NodeAffinityRequired(req *corev1api.NodeSelector) *PersistentVolumeBuilder {
|
||||
b.object.Spec.NodeAffinity = &corev1api.VolumeNodeAffinity{
|
||||
|
||||
@@ -67,3 +67,8 @@ func (v *VolumeSnapshotBuilder) BoundVolumeSnapshotContentName(vscName string) *
|
||||
v.object.Status.BoundVolumeSnapshotContentName = &vscName
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *VolumeSnapshotBuilder) SourcePVC(name string) *VolumeSnapshotBuilder {
|
||||
v.object.Spec.Source.PersistentVolumeClaimName = &name
|
||||
return v
|
||||
}
|
||||
|
||||
21
pkg/client/auth_providers.go
Normal file
21
pkg/client/auth_providers.go
Normal file
@@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
// Make sure we import the client-go auth provider plugins.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
)
|
||||
@@ -18,6 +18,7 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
)
|
||||
|
||||
// DynamicFactory contains methods for retrieving dynamic clients for GroupVersionResources and
|
||||
@@ -33,6 +35,8 @@ type DynamicFactory interface {
|
||||
// ClientForGroupVersionResource returns a Dynamic client for the given group/version
|
||||
// and resource for the given namespace.
|
||||
ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error)
|
||||
// DynamicSharedInformerFactoryForNamespace returns a DynamicSharedInformerFactory for the given namespace.
|
||||
DynamicSharedInformerFactoryForNamespace(namespace string) dynamicinformer.DynamicSharedInformerFactory
|
||||
}
|
||||
|
||||
// dynamicFactory implements DynamicFactory.
|
||||
@@ -51,6 +55,10 @@ func (f *dynamicFactory) ClientForGroupVersionResource(gv schema.GroupVersion, r
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *dynamicFactory) DynamicSharedInformerFactoryForNamespace(namespace string) dynamicinformer.DynamicSharedInformerFactory {
|
||||
return dynamicinformer.NewFilteredDynamicSharedInformerFactory(f.dynamicClient, time.Minute, namespace, nil)
|
||||
}
|
||||
|
||||
// Creator creates an object.
|
||||
type Creator interface {
|
||||
// Create creates an object.
|
||||
|
||||
44
pkg/client/retry.go
Normal file
44
pkg/client/retry.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/client-go/util/retry"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func CreateRetryGenerateName(client kbclient.Client, ctx context.Context, obj kbclient.Object) error {
|
||||
return CreateRetryGenerateNameWithFunc(obj, func() error {
|
||||
return client.Create(ctx, obj, &kbclient.CreateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func CreateRetryGenerateNameWithFunc(obj kbclient.Object, createFn func() error) error {
|
||||
retryCreateFn := func() error {
|
||||
// needed to ensure that the name from the failed create isn't left on the object between retries
|
||||
obj.SetName("")
|
||||
return createFn()
|
||||
}
|
||||
if obj.GetGenerateName() != "" && obj.GetName() == "" {
|
||||
return retry.OnError(retry.DefaultRetry, apierrors.IsAlreadyExists, retryCreateFn)
|
||||
} else {
|
||||
return createFn()
|
||||
}
|
||||
}
|
||||
@@ -95,6 +95,7 @@ type CreateOptions struct {
|
||||
ExcludeNamespaceScopedResources flag.StringArray
|
||||
Labels flag.Map
|
||||
Selector flag.LabelSelector
|
||||
OrSelector flag.OrLabelSelector
|
||||
IncludeClusterResources flag.OptionalBool
|
||||
Wait bool
|
||||
StorageLocation string
|
||||
@@ -130,6 +131,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&o.StorageLocation, "storage-location", "", "Location in which to store the backup.")
|
||||
flags.StringSliceVar(&o.SnapshotLocations, "volume-snapshot-locations", o.SnapshotLocations, "List of locations (at most one per provider) where volume snapshots should be stored.")
|
||||
flags.VarP(&o.Selector, "selector", "l", "Only back up resources matching this label selector.")
|
||||
flags.Var(&o.OrSelector, "or-selector", "Backup resources matching at least one of the label selector from the list. Label selectors should be separated by ' or '. For example, foo=bar or app=nginx")
|
||||
flags.StringVar(&o.OrderedResources, "ordered-resources", "", "Mapping Kinds to an ordered list of specific resources of that Kind. Resource names are separated by commas and their names are in format 'namespace/resourcename'. For cluster scope resource, simply use resource name. Key-value pairs in the mapping are separated by semi-colon. Example: 'pods=ns1/pod1,ns1/pod2;persistentvolumeclaims=ns1/pvc4,ns1/pvc8'. Optional.")
|
||||
flags.DurationVar(&o.CSISnapshotTimeout, "csi-snapshot-timeout", o.CSISnapshotTimeout, "How long to wait for CSI snapshot creation before timeout.")
|
||||
flags.DurationVar(&o.ItemOperationTimeout, "item-operation-timeout", o.ItemOperationTimeout, "How long to wait for async plugin operations before timeout.")
|
||||
@@ -168,9 +170,8 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := f.KubebuilderWatchClient()
|
||||
if err != nil {
|
||||
return err
|
||||
if o.Selector.LabelSelector != nil && o.OrSelector.OrLabelSelectors != nil {
|
||||
return fmt.Errorf("either a 'selector' or an 'or-selector' can be specified, but not both")
|
||||
}
|
||||
|
||||
// Ensure that unless FromSchedule is set, args contains a backup name
|
||||
@@ -191,7 +192,7 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
|
||||
|
||||
if o.StorageLocation != "" {
|
||||
location := &velerov1api.BackupStorageLocation{}
|
||||
if err := client.Get(context.Background(), kbclient.ObjectKey{
|
||||
if err := o.client.Get(context.Background(), kbclient.ObjectKey{
|
||||
Namespace: f.Namespace(),
|
||||
Name: o.StorageLocation,
|
||||
}, location); err != nil {
|
||||
@@ -201,7 +202,7 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
|
||||
|
||||
for _, loc := range o.SnapshotLocations {
|
||||
snapshotLocation := new(velerov1api.VolumeSnapshotLocation)
|
||||
if err := o.client.Get(context.TODO(), kbclient.ObjectKey{Namespace: f.Namespace(), Name: loc}, snapshotLocation); err != nil {
|
||||
if err := o.client.Get(context.Background(), kbclient.ObjectKey{Namespace: f.Namespace(), Name: loc}, snapshotLocation); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -365,6 +366,7 @@ func (o *CreateOptions) BuildBackup(namespace string) (*velerov1api.Backup, erro
|
||||
IncludedNamespaceScopedResources(o.IncludeNamespaceScopedResources...).
|
||||
ExcludedNamespaceScopedResources(o.ExcludeNamespaceScopedResources...).
|
||||
LabelSelector(o.Selector.LabelSelector).
|
||||
OrLabelSelector(o.OrSelector.OrLabelSelectors).
|
||||
TTL(o.TTL).
|
||||
StorageLocation(o.StorageLocation).
|
||||
VolumeSnapshotLocations(o.SnapshotLocations...).
|
||||
|
||||
@@ -47,6 +47,15 @@ func TestCreateOptions_BuildBackup(t *testing.T) {
|
||||
orders, err := ParseOrderedResources(o.OrderedResources)
|
||||
o.CSISnapshotTimeout = 20 * time.Minute
|
||||
o.ItemOperationTimeout = 20 * time.Minute
|
||||
orLabelSelectors := []*metav1.LabelSelector{
|
||||
{
|
||||
MatchLabels: map[string]string{"k1": "v1", "k2": "v2"},
|
||||
},
|
||||
{
|
||||
MatchLabels: map[string]string{"a1": "b1", "a2": "b2"},
|
||||
},
|
||||
}
|
||||
o.OrSelector.OrLabelSelectors = orLabelSelectors
|
||||
assert.NoError(t, err)
|
||||
|
||||
backup, err := o.BuildBackup(cmdtest.VeleroNameSpace)
|
||||
@@ -58,6 +67,7 @@ func TestCreateOptions_BuildBackup(t *testing.T) {
|
||||
SnapshotVolumes: o.SnapshotVolumes.Value,
|
||||
IncludeClusterResources: o.IncludeClusterResources.Value,
|
||||
OrderedResources: orders,
|
||||
OrLabelSelectors: orLabelSelectors,
|
||||
CSISnapshotTimeout: metav1.Duration{Duration: o.CSISnapshotTimeout},
|
||||
ItemOperationTimeout: metav1.Duration{Duration: o.ItemOperationTimeout},
|
||||
}, backup.Spec)
|
||||
|
||||
@@ -124,7 +124,7 @@ func Run(o *cli.DeleteOptions) error {
|
||||
ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, label.GetValidName(b.Name),
|
||||
velerov1api.BackupUIDLabel, string(b.UID)), builder.WithGenerateName(b.Name+"-")).Result()
|
||||
|
||||
if err := o.Client.Create(context.TODO(), deleteRequest, &controllerclient.CreateOptions{}); err != nil {
|
||||
if err := client.CreateRetryGenerateName(o.Client, context.TODO(), deleteRequest); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -57,6 +57,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
kbClient, err := f.KubebuilderClient()
|
||||
cmd.CheckError(err)
|
||||
|
||||
var csiClient *snapshotv1client.Clientset
|
||||
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
|
||||
clientConfig, err := f.ClientConfig()
|
||||
cmd.CheckError(err)
|
||||
csiClient, err = snapshotv1client.NewForConfig(clientConfig)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
if outputFormat != "plaintext" && outputFormat != "json" {
|
||||
cmd.CheckError(fmt.Errorf("invalid output format '%s'. valid value are 'plaintext, json'", outputFormat))
|
||||
}
|
||||
@@ -96,16 +104,9 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
fmt.Fprintf(os.Stderr, "error getting PodVolumeBackups for backup %s: %v\n", backup.Name, err)
|
||||
}
|
||||
|
||||
var csiClient *snapshotv1client.Clientset
|
||||
// declare vscList up here since it may be empty and we'll pass the empty Items field into DescribeBackup
|
||||
vscList := new(snapshotv1api.VolumeSnapshotContentList)
|
||||
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
|
||||
clientConfig, err := f.ClientConfig()
|
||||
cmd.CheckError(err)
|
||||
|
||||
csiClient, err = snapshotv1client.NewForConfig(clientConfig)
|
||||
cmd.CheckError(err)
|
||||
|
||||
opts := label.NewListOptionsForBackup(backup.Name)
|
||||
vscList, err = csiClient.SnapshotV1().VolumeSnapshotContents().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,7 @@ func TestNewDescribeCommand(t *testing.T) {
|
||||
|
||||
if err == nil {
|
||||
assert.Contains(t, stdout, "Velero-Native Snapshots: <none included>")
|
||||
assert.Contains(t, stdout, "Or label selector: <none>")
|
||||
assert.Contains(t, stdout, fmt.Sprintf("Name: %s", backupName))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,8 +33,6 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/cli"
|
||||
)
|
||||
|
||||
const bslLabelKey = "velero.io/storage-location"
|
||||
|
||||
// NewDeleteCommand creates and returns a new cobra command for deleting backup-locations.
|
||||
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
|
||||
o := cli.NewDeleteOptions("backup-location")
|
||||
@@ -146,7 +144,7 @@ func findAssociatedBackups(client kbclient.Client, bslName, ns string) (velerov1
|
||||
var backups velerov1api.BackupList
|
||||
err := client.List(context.Background(), &backups, &kbclient.ListOptions{
|
||||
Namespace: ns,
|
||||
Raw: &metav1.ListOptions{LabelSelector: bslLabelKey + "=" + bslName},
|
||||
Raw: &metav1.ListOptions{LabelSelector: velerov1api.StorageLocationLabel + "=" + bslName},
|
||||
})
|
||||
return backups, err
|
||||
}
|
||||
@@ -155,7 +153,7 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele
|
||||
var repos velerov1api.BackupRepositoryList
|
||||
err := client.List(context.Background(), &repos, &kbclient.ListOptions{
|
||||
Namespace: ns,
|
||||
Raw: &metav1.ListOptions{LabelSelector: bslLabelKey + "=" + bslName},
|
||||
Raw: &metav1.ListOptions{LabelSelector: velerov1api.StorageLocationLabel + "=" + bslName},
|
||||
})
|
||||
return repos, err
|
||||
}
|
||||
|
||||
@@ -66,6 +66,7 @@ type Options struct {
|
||||
BackupStorageConfig flag.Map
|
||||
VolumeSnapshotConfig flag.Map
|
||||
UseNodeAgent bool
|
||||
PrivilegedNodeAgent bool
|
||||
//TODO remove UseRestic when migration test out of using it
|
||||
UseRestic bool
|
||||
Wait bool
|
||||
@@ -79,6 +80,8 @@ type Options struct {
|
||||
Features string
|
||||
DefaultVolumesToFsBackup bool
|
||||
UploaderType string
|
||||
DefaultSnapshotMoveData bool
|
||||
DisableInformerCache bool
|
||||
}
|
||||
|
||||
// BindFlags adds command line values to the options struct.
|
||||
@@ -109,6 +112,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&o.RestoreOnly, "restore-only", o.RestoreOnly, "Run the server in restore-only mode. Optional.")
|
||||
flags.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Generate resources, but don't send them to the cluster. Use with -o. Optional.")
|
||||
flags.BoolVar(&o.UseNodeAgent, "use-node-agent", o.UseNodeAgent, "Create Velero node-agent daemonset. Optional. Velero node-agent hosts Velero modules that need to run in one or more nodes(i.e. Restic, Kopia).")
|
||||
flags.BoolVar(&o.PrivilegedNodeAgent, "privileged-node-agent", o.PrivilegedNodeAgent, "Use privileged mode for the node agent. Optional. Required to backup block devices.")
|
||||
flags.BoolVar(&o.Wait, "wait", o.Wait, "Wait for Velero deployment to be ready. Optional.")
|
||||
flags.DurationVar(&o.DefaultRepoMaintenanceFrequency, "default-repo-maintain-frequency", o.DefaultRepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default. Optional.")
|
||||
flags.DurationVar(&o.GarbageCollectionFrequency, "garbage-collection-frequency", o.GarbageCollectionFrequency, "How often the garbage collection runs for expired backups.(default 1h)")
|
||||
@@ -118,6 +122,8 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&o.Features, "features", o.Features, "Comma separated list of Velero feature flags to be set on the Velero deployment and the node-agent daemonset, if node-agent is enabled")
|
||||
flags.BoolVar(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", o.DefaultVolumesToFsBackup, "Bool flag to configure Velero server to use pod volume file system backup by default for all volumes on all backups. Optional.")
|
||||
flags.StringVar(&o.UploaderType, "uploader-type", o.UploaderType, fmt.Sprintf("The type of uploader to transfer the data of pod volumes, the supported values are '%s', '%s'", uploader.ResticType, uploader.KopiaType))
|
||||
flags.BoolVar(&o.DefaultSnapshotMoveData, "default-snapshot-move-data", o.DefaultSnapshotMoveData, "Bool flag to configure Velero server to move data by default for all snapshots supporting data movement. Optional.")
|
||||
flags.BoolVar(&o.DisableInformerCache, "disable-informer-cache", o.DisableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable). Optional.")
|
||||
}
|
||||
|
||||
// NewInstallOptions instantiates a new, default InstallOptions struct.
|
||||
@@ -144,6 +150,8 @@ func NewInstallOptions() *Options {
|
||||
CRDsOnly: false,
|
||||
DefaultVolumesToFsBackup: false,
|
||||
UploaderType: uploader.KopiaType,
|
||||
DefaultSnapshotMoveData: false,
|
||||
DisableInformerCache: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,6 +203,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
SecretData: secretData,
|
||||
RestoreOnly: o.RestoreOnly,
|
||||
UseNodeAgent: o.UseNodeAgent,
|
||||
PrivilegedNodeAgent: o.PrivilegedNodeAgent,
|
||||
UseVolumeSnapshots: o.UseVolumeSnapshots,
|
||||
BSLConfig: o.BackupStorageConfig.Data(),
|
||||
VSLConfig: o.VolumeSnapshotConfig.Data(),
|
||||
@@ -206,6 +215,8 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
Features: strings.Split(o.Features, ","),
|
||||
DefaultVolumesToFsBackup: o.DefaultVolumesToFsBackup,
|
||||
UploaderType: o.UploaderType,
|
||||
DefaultSnapshotMoveData: o.DefaultSnapshotMoveData,
|
||||
DisableInformerCache: o.DisableInformerCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -114,6 +114,7 @@ func NewServerCommand(f client.Factory) *cobra.Command {
|
||||
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
|
||||
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters. Default is 10 minutes.")
|
||||
command.Flags().DurationVar(&config.dataMoverPrepareTimeout, "data-mover-prepare-timeout", config.dataMoverPrepareTimeout, "How long to wait for preparing a DataUpload/DataDownload. Default is 30 minutes.")
|
||||
command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics")
|
||||
|
||||
return command
|
||||
}
|
||||
@@ -193,14 +194,15 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
}
|
||||
|
||||
s := &nodeAgentServer{
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
fileSystem: filesystem.NewFileSystem(),
|
||||
mgr: mgr,
|
||||
config: config,
|
||||
namespace: factory.Namespace(),
|
||||
nodeName: nodeName,
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
fileSystem: filesystem.NewFileSystem(),
|
||||
mgr: mgr,
|
||||
config: config,
|
||||
namespace: factory.Namespace(),
|
||||
nodeName: nodeName,
|
||||
metricsAddress: config.metricsAddress,
|
||||
}
|
||||
|
||||
// the cache isn't initialized yet when "validatePodVolumesHostPath" is called, the client returned by the manager cannot
|
||||
@@ -361,7 +363,7 @@ func (s *nodeAgentServer) markDataUploadsCancel(r *controller.DataUploadReconcil
|
||||
return
|
||||
}
|
||||
if dataUploads, err := r.FindDataUploads(s.ctx, client, s.namespace); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to find data downloads")
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to find data uploads")
|
||||
} else {
|
||||
for i := range dataUploads {
|
||||
du := dataUploads[i]
|
||||
@@ -463,7 +465,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
continue
|
||||
}
|
||||
if pod.Spec.NodeName != s.nodeName {
|
||||
s.logger.Debugf("the node of pod referenced by podvolumebackup %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName)
|
||||
s.logger.Debugf("the node of pod referenced by podvolumerestore %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -92,6 +92,7 @@ type CreateOptions struct {
|
||||
StatusExcludeResources flag.StringArray
|
||||
NamespaceMappings flag.Map
|
||||
Selector flag.LabelSelector
|
||||
OrSelector flag.OrLabelSelector
|
||||
IncludeClusterResources flag.OptionalBool
|
||||
Wait bool
|
||||
AllowPartiallyFailed flag.OptionalBool
|
||||
@@ -124,6 +125,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.Var(&o.StatusIncludeResources, "status-include-resources", "Resources to include in the restore status, formatted as resource.group, such as storageclasses.storage.k8s.io.")
|
||||
flags.Var(&o.StatusExcludeResources, "status-exclude-resources", "Resources to exclude from the restore status, formatted as resource.group, such as storageclasses.storage.k8s.io.")
|
||||
flags.VarP(&o.Selector, "selector", "l", "Only restore resources matching this label selector.")
|
||||
flags.Var(&o.OrSelector, "or-selector", "Restore resources matching at least one of the label selector from the list. Label selectors should be separated by ' or '. For example, foo=bar or app=nginx")
|
||||
flags.DurationVar(&o.ItemOperationTimeout, "item-operation-timeout", o.ItemOperationTimeout, "How long to wait for async plugin operations before timeout.")
|
||||
f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.")
|
||||
// this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true"
|
||||
@@ -185,6 +187,10 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
|
||||
return errors.New("Velero client is not set; unable to proceed")
|
||||
}
|
||||
|
||||
if o.Selector.LabelSelector != nil && o.OrSelector.OrLabelSelectors != nil {
|
||||
return errors.New("either a 'selector' or an 'or-selector' can be specified, but not both")
|
||||
}
|
||||
|
||||
if len(o.ExistingResourcePolicy) > 0 && !isResourcePolicyValid(o.ExistingResourcePolicy) {
|
||||
return errors.New("existing-resource-policy has invalid value, it accepts only none, update as value")
|
||||
}
|
||||
@@ -304,6 +310,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
ExistingResourcePolicy: api.PolicyType(o.ExistingResourcePolicy),
|
||||
NamespaceMapping: o.NamespaceMappings.Data(),
|
||||
LabelSelector: o.Selector.LabelSelector,
|
||||
OrLabelSelectors: o.OrSelector.OrLabelSelectors,
|
||||
RestorePVs: o.RestoreVolumes.Value,
|
||||
PreserveNodePorts: o.PreserveNodePorts.Value,
|
||||
IncludeClusterResources: o.IncludeClusterResources.Value,
|
||||
|
||||
@@ -145,6 +145,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
ExcludedNamespaceScopedResources: o.BackupOptions.ExcludeNamespaceScopedResources,
|
||||
IncludeClusterResources: o.BackupOptions.IncludeClusterResources.Value,
|
||||
LabelSelector: o.BackupOptions.Selector.LabelSelector,
|
||||
OrLabelSelectors: o.BackupOptions.OrSelector.OrLabelSelectors,
|
||||
SnapshotVolumes: o.BackupOptions.SnapshotVolumes.Value,
|
||||
TTL: metav1.Duration{Duration: o.BackupOptions.TTL},
|
||||
StorageLocation: o.BackupOptions.StorageLocation,
|
||||
@@ -153,6 +154,8 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
OrderedResources: orders,
|
||||
CSISnapshotTimeout: metav1.Duration{Duration: o.BackupOptions.CSISnapshotTimeout},
|
||||
ItemOperationTimeout: metav1.Duration{Duration: o.BackupOptions.ItemOperationTimeout},
|
||||
DataMover: o.BackupOptions.DataMover,
|
||||
SnapshotMoveData: o.BackupOptions.SnapshotMoveData.Value,
|
||||
},
|
||||
Schedule: o.Schedule,
|
||||
UseOwnerReferencesInBackup: &o.UseOwnerReferencesInBackup,
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
)
|
||||
|
||||
type Getter interface {
|
||||
@@ -40,7 +41,7 @@ type DefaultServerStatusGetter struct {
|
||||
func (g *DefaultServerStatusGetter) GetServerStatus(kbClient kbclient.Client) (*velerov1api.ServerStatusRequest, error) {
|
||||
created := builder.ForServerStatusRequest(g.Namespace, "", "0").ObjectMeta(builder.WithGenerateName("velero-cli-")).Result()
|
||||
|
||||
if err := kbClient.Create(context.Background(), created, &kbclient.CreateOptions{}); err != nil {
|
||||
if err := veleroclient.CreateRetryGenerateName(kbClient, context.Background(), created); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@ package uninstall
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -40,6 +42,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/cli"
|
||||
@@ -50,6 +53,8 @@ import (
|
||||
|
||||
var gracefulDeletionMaximumDuration = 1 * time.Minute
|
||||
|
||||
var resToDelete = []kbclient.ObjectList{}
|
||||
|
||||
// uninstallOptions collects all the options for uninstalling Velero from a Kubernetes cluster.
|
||||
type uninstallOptions struct {
|
||||
wait bool // deprecated
|
||||
@@ -167,11 +172,7 @@ func Run(ctx context.Context, kbClient kbclient.Client, namespace string) error
|
||||
}
|
||||
|
||||
func deleteNamespace(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// Deal with resources with attached finalizers to ensure proper handling of those finalizers.
|
||||
if err := deleteResourcesWithFinalizer(ctx, kbClient, namespace); err != nil {
|
||||
return errors.Wrap(err, "Fail to remove finalizer from restores")
|
||||
}
|
||||
|
||||
// First check if it's already been deleted
|
||||
ns := &corev1.Namespace{}
|
||||
key := kbclient.ObjectKey{Name: namespace}
|
||||
if err := kbClient.Get(ctx, key, ns); err != nil {
|
||||
@@ -182,6 +183,11 @@ func deleteNamespace(ctx context.Context, kbClient kbclient.Client, namespace st
|
||||
return err
|
||||
}
|
||||
|
||||
// Deal with resources with attached finalizers to ensure proper handling of those finalizers.
|
||||
if err := deleteResourcesWithFinalizer(ctx, kbClient, namespace); err != nil {
|
||||
return errors.Wrap(err, "Fail to remove finalizer from restores")
|
||||
}
|
||||
|
||||
if err := kbClient.Delete(ctx, ns); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("Velero namespace %q does not exist, skipping.\n", namespace)
|
||||
@@ -223,80 +229,151 @@ func deleteNamespace(ctx context.Context, kbClient kbclient.Client, namespace st
|
||||
// 2. The controller may encounter errors while handling the finalizer, in such case, the controller will keep trying until it succeeds.
|
||||
// So it is important to set a timeout, once the process exceed the timeout, we will forcedly delete the resources by removing the finalizer from them,
|
||||
// otherwise the deletion process may get stuck indefinitely.
|
||||
// 3. There is only restore finalizer supported as of v1.12. If any new finalizers are added in the future, the corresponding deletion logic can be
|
||||
// 3. There is only resources finalizer supported as of v1.12. If any new finalizers are added in the future, the corresponding deletion logic can be
|
||||
// incorporated into this function.
|
||||
func deleteResourcesWithFinalizer(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
fmt.Println("Waiting for resource with attached finalizer to be deleted")
|
||||
return deleteRestore(ctx, kbClient, namespace)
|
||||
return deleteResources(ctx, kbClient, namespace)
|
||||
}
|
||||
|
||||
func deleteRestore(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// Check if restore crd exists, if it does not exist, return immediately.
|
||||
func checkResources(ctx context.Context, kbClient kbclient.Client) error {
|
||||
checkCRDs := []string{"restores.velero.io", "datauploads.velero.io", "datadownloads.velero.io"}
|
||||
var err error
|
||||
v1crd := &apiextv1.CustomResourceDefinition{}
|
||||
key := kbclient.ObjectKey{Name: "restores.velero.io"}
|
||||
if err = kbClient.Get(ctx, key, v1crd); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
for _, crd := range checkCRDs {
|
||||
key := kbclient.ObjectKey{Name: crd}
|
||||
if err = kbClient.Get(ctx, key, v1crd); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "Error getting %s crd", crd)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "Error getting restore crd")
|
||||
// no error with found CRD that we should delete
|
||||
switch crd {
|
||||
case "restores.velero.io":
|
||||
resToDelete = append(resToDelete, &velerov1api.RestoreList{})
|
||||
case "datauploads.velero.io":
|
||||
resToDelete = append(resToDelete, &velerov2alpha1api.DataUploadList{})
|
||||
case "datadownloads.velero.io":
|
||||
resToDelete = append(resToDelete, &velerov2alpha1api.DataDownloadList{})
|
||||
default:
|
||||
fmt.Printf("Unsupported type %s to be cleared\n", crd)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt to gracefully delete all the restore within a specified time frame, If the process exceeds the timeout limit,
|
||||
// it is likely that there may be errors during the finalization of restores. In such cases, we should proceed with forcefully deleting the restores.
|
||||
err = gracefullyDeleteRestore(ctx, kbClient, namespace)
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
return errors.Wrap(err, "Error deleting restores")
|
||||
func deleteResources(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// Check if resources crd exists, if it does not exist, return immediately.
|
||||
err := checkResources(ctx, kbClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// First attempt to gracefully delete all the resources within a specified time frame, If the process exceeds the timeout limit,
|
||||
// it is likely that there may be errors during the finalization of restores. In such cases, we should proceed with forcefully deleting the restores.
|
||||
err = gracefullyDeleteResources(ctx, kbClient, namespace)
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
return errors.Wrap(err, "Error deleting resources")
|
||||
}
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = forcedlyDeleteRestore(ctx, kbClient, namespace)
|
||||
err = forcedlyDeleteResources(ctx, kbClient, namespace)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error deleting restores")
|
||||
return errors.Wrap(err, "Error deleting resources forcedly")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gracefullyDeleteRestore(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
var err error
|
||||
restoreList := &velerov1api.RestoreList{}
|
||||
if err = kbClient.List(ctx, restoreList, &kbclient.ListOptions{Namespace: namespace}); err != nil {
|
||||
return errors.Wrap(err, "Error getting restores during graceful deletion")
|
||||
func gracefullyDeleteResources(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
errorChan := make(chan error)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(resToDelete))
|
||||
|
||||
for i := range resToDelete {
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
errorChan <- gracefullyDeleteResource(ctx, kbClient, namespace, resToDelete[index])
|
||||
}(i)
|
||||
}
|
||||
|
||||
for i := range restoreList.Items {
|
||||
if err = kbClient.Delete(ctx, &restoreList.Items[i]); err != nil {
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errorChan)
|
||||
}()
|
||||
|
||||
for err := range errorChan {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return waitDeletingResources(ctx, kbClient, namespace)
|
||||
}
|
||||
|
||||
func gracefullyDeleteResource(ctx context.Context, kbClient kbclient.Client, namespace string, list kbclient.ObjectList) error {
|
||||
if err := kbClient.List(ctx, list, &kbclient.ListOptions{Namespace: namespace}); err != nil {
|
||||
return errors.Wrap(err, "Error getting resources during graceful deletion")
|
||||
}
|
||||
|
||||
var objectsToDelete []kbclient.Object
|
||||
items := reflect.ValueOf(list).Elem().FieldByName("Items")
|
||||
|
||||
for i := 0; i < items.Len(); i++ {
|
||||
item := items.Index(i).Addr().Interface()
|
||||
// Type assertion to cast item to the appropriate type
|
||||
switch typedItem := item.(type) {
|
||||
case *velerov1api.Restore:
|
||||
objectsToDelete = append(objectsToDelete, typedItem)
|
||||
case *velerov2alpha1api.DataUpload:
|
||||
objectsToDelete = append(objectsToDelete, typedItem)
|
||||
case *velerov2alpha1api.DataDownload:
|
||||
objectsToDelete = append(objectsToDelete, typedItem)
|
||||
default:
|
||||
return errors.New("Unsupported resource type")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete collected resources in a batch
|
||||
for _, resource := range objectsToDelete {
|
||||
if err := kbClient.Delete(ctx, resource); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return errors.Wrap(err, "Error deleting restores during graceful deletion")
|
||||
return errors.Wrap(err, "Error deleting resources during graceful deletion")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitDeletingResources(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// Wait for the deletion of all the restores within a specified time frame
|
||||
err = wait.PollImmediate(time.Second, gracefulDeletionMaximumDuration, func() (bool, error) {
|
||||
restoreList := &velerov1api.RestoreList{}
|
||||
if errList := kbClient.List(ctx, restoreList, &kbclient.ListOptions{Namespace: namespace}); errList != nil {
|
||||
return false, errList
|
||||
err := wait.PollImmediate(time.Second, gracefulDeletionMaximumDuration, func() (bool, error) {
|
||||
itemsCount := 0
|
||||
for i := range resToDelete {
|
||||
if errList := kbClient.List(ctx, resToDelete[i], &kbclient.ListOptions{Namespace: namespace}); errList != nil {
|
||||
return false, errList
|
||||
}
|
||||
itemsCount += reflect.ValueOf(resToDelete[i]).Elem().FieldByName("Items").Len()
|
||||
if itemsCount > 0 {
|
||||
fmt.Print(".")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(restoreList.Items) > 0 {
|
||||
fmt.Print(".")
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func forcedlyDeleteRestore(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
func forcedlyDeleteResources(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// Delete velero deployment first in case:
|
||||
// 1. finalizers will be added back by restore controller after they are removed at next step;
|
||||
// 2. new restores attached with finalizer will be created by restore controller after we remove all the restores' finalizer at next step;
|
||||
// 1. finalizers will be added back by resources related controller after they are removed at next step;
|
||||
// 2. new resources attached with finalizer will be created by controller after we remove all the resources' finalizer at next step;
|
||||
deploy := &appsv1api.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
@@ -330,23 +407,56 @@ func forcedlyDeleteRestore(ctx context.Context, kbClient kbclient.Client, namesp
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error deleting velero deployment during force deletion")
|
||||
}
|
||||
return removeResourcesFinalizer(ctx, kbClient, namespace)
|
||||
}
|
||||
|
||||
// Remove all the restores' finalizer so they can be deleted during the deletion of velero namespace.
|
||||
restoreList := &velerov1api.RestoreList{}
|
||||
if err := kbClient.List(ctx, restoreList, &kbclient.ListOptions{Namespace: namespace}); err != nil {
|
||||
return errors.Wrap(err, "Error getting restores during force deletion")
|
||||
func removeResourcesFinalizer(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
for i := range resToDelete {
|
||||
if err := removeResourceFinalizer(ctx, kbClient, namespace, resToDelete[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeResourceFinalizer(ctx context.Context, kbClient kbclient.Client, namespace string, resourceList kbclient.ObjectList) error {
|
||||
listOptions := &kbclient.ListOptions{Namespace: namespace}
|
||||
if err := kbClient.List(ctx, resourceList, listOptions); err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("Error getting resources of type %T during force deletion", resourceList))
|
||||
}
|
||||
|
||||
for i := range restoreList.Items {
|
||||
if controllerutil.ContainsFinalizer(&restoreList.Items[i], controller.ExternalResourcesFinalizer) {
|
||||
update := &restoreList.Items[i]
|
||||
original := update.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(update, controller.ExternalResourcesFinalizer)
|
||||
if err := kubeutil.PatchResource(original, update, kbClient); err != nil {
|
||||
return errors.Wrap(err, "Error removing restore finalizer during force deletion")
|
||||
}
|
||||
items := reflect.ValueOf(resourceList).Elem().FieldByName("Items")
|
||||
var err error
|
||||
for i := 0; i < items.Len(); i++ {
|
||||
item := items.Index(i).Addr().Interface()
|
||||
// Type assertion to cast item to the appropriate type
|
||||
switch typedItem := item.(type) {
|
||||
case *velerov1api.Restore:
|
||||
err = removeFinalizerForObject(typedItem, controller.ExternalResourcesFinalizer, kbClient)
|
||||
case *velerov2alpha1api.DataUpload:
|
||||
err = removeFinalizerForObject(typedItem, controller.DataUploadDownloadFinalizer, kbClient)
|
||||
case *velerov2alpha1api.DataDownload:
|
||||
err = removeFinalizerForObject(typedItem, controller.DataUploadDownloadFinalizer, kbClient)
|
||||
default:
|
||||
err = errors.Errorf("Unsupported resource type %T", typedItem)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeFinalizerForObject(obj kbclient.Object, finalizer string, kbClient kbclient.Client) error {
|
||||
if controllerutil.ContainsFinalizer(obj, finalizer) {
|
||||
update := obj.DeepCopyObject().(kbclient.Object)
|
||||
original := obj.DeepCopyObject().(kbclient.Object)
|
||||
|
||||
controllerutil.RemoveFinalizer(update, finalizer)
|
||||
if err := kubeutil.PatchResource(original, update, kbClient); err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("Error removing finalizer %q during force deletion", finalizer))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -112,6 +112,7 @@ const (
|
||||
defaultCredentialsDirectory = "/tmp/credentials"
|
||||
|
||||
defaultMaxConcurrentK8SConnections = 30
|
||||
defaultDisableInformerCache = false
|
||||
)
|
||||
|
||||
type serverConfig struct {
|
||||
@@ -135,6 +136,8 @@ type serverConfig struct {
|
||||
defaultVolumesToFsBackup bool
|
||||
uploaderType string
|
||||
maxConcurrentK8SConnections int
|
||||
defaultSnapshotMoveData bool
|
||||
disableInformerCache bool
|
||||
}
|
||||
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
@@ -163,6 +166,8 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
defaultVolumesToFsBackup: podvolume.DefaultVolumesToFsBackup,
|
||||
uploaderType: uploader.ResticType,
|
||||
maxConcurrentK8SConnections: defaultMaxConcurrentK8SConnections,
|
||||
defaultSnapshotMoveData: false,
|
||||
disableInformerCache: defaultDisableInformerCache,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -233,6 +238,8 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
command.Flags().DurationVar(&config.defaultItemOperationTimeout, "default-item-operation-timeout", config.defaultItemOperationTimeout, "How long to wait on asynchronous BackupItemActions and RestoreItemActions to complete before timing out. Default is 4 hours")
|
||||
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters. Default is 10 minutes.")
|
||||
command.Flags().IntVar(&config.maxConcurrentK8SConnections, "max-concurrent-k8s-connections", config.maxConcurrentK8SConnections, "Max concurrent connections number that Velero can create with kube-apiserver. Default is 30.")
|
||||
command.Flags().BoolVar(&config.defaultSnapshotMoveData, "default-snapshot-move-data", config.defaultSnapshotMoveData, "Move data by default for all snapshots supporting data movement.")
|
||||
command.Flags().BoolVar(&config.disableInformerCache, "disable-informer-cache", config.disableInformerCache, "Disable informer cache for Get calls on restore. WIth this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable).")
|
||||
|
||||
return command
|
||||
}
|
||||
@@ -754,9 +761,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
backupStoreGetter,
|
||||
s.config.formatFlag.Parse(),
|
||||
s.csiSnapshotLister,
|
||||
s.csiSnapshotClient,
|
||||
s.credentialFileStore,
|
||||
s.config.maxConcurrentK8SConnections,
|
||||
s.config.defaultSnapshotMoveData,
|
||||
).SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", controller.Backup)
|
||||
}
|
||||
@@ -817,6 +824,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
cmd.CheckError(err)
|
||||
r := controller.NewBackupFinalizerReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.csiSnapshotLister,
|
||||
clock.RealClock{},
|
||||
backupper,
|
||||
newPluginManager,
|
||||
@@ -933,6 +941,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.metrics,
|
||||
s.config.formatFlag.Parse(),
|
||||
s.config.defaultItemOperationTimeout,
|
||||
s.config.disableInformerCache,
|
||||
)
|
||||
|
||||
if err = r.SetupWithManager(s.mgr); err != nil {
|
||||
|
||||
61
pkg/cmd/util/flag/orlabelselector.go
Normal file
61
pkg/cmd/util/flag/orlabelselector.go
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flag
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// OrLabelSelector is a Cobra-compatible wrapper for defining
|
||||
// a Kubernetes or-label-selector flag.
|
||||
type OrLabelSelector struct {
|
||||
OrLabelSelectors []*metav1.LabelSelector
|
||||
}
|
||||
|
||||
// String returns a string representation of the or-label
|
||||
// selector flag.
|
||||
func (ls *OrLabelSelector) String() string {
|
||||
orLabels := []string{}
|
||||
for _, v := range ls.OrLabelSelectors {
|
||||
orLabels = append(orLabels, metav1.FormatLabelSelector(v))
|
||||
}
|
||||
return strings.Join(orLabels, " or ")
|
||||
}
|
||||
|
||||
// Set parses the provided string and assigns the result
|
||||
// to the or-label-selector receiver. It returns an error if
|
||||
// the string is not parseable.
|
||||
func (ls *OrLabelSelector) Set(s string) error {
|
||||
orItems := strings.Split(s, " or ")
|
||||
ls.OrLabelSelectors = make([]*metav1.LabelSelector, 0)
|
||||
for _, orItem := range orItems {
|
||||
parsed, err := metav1.ParseToLabelSelector(orItem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ls.OrLabelSelectors = append(ls.OrLabelSelectors, parsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns a string representation of the
|
||||
// OrLabelSelector type.
|
||||
func (ls *OrLabelSelector) Type() string {
|
||||
return "orLabelSelector"
|
||||
}
|
||||
102
pkg/cmd/util/flag/orlabelselector_test.go
Normal file
102
pkg/cmd/util/flag/orlabelselector_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package flag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestStringOfOrLabelSelector(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
orLabelSelector *OrLabelSelector
|
||||
expectedStr string
|
||||
}{
|
||||
{
|
||||
name: "or between two labels",
|
||||
orLabelSelector: &OrLabelSelector{
|
||||
OrLabelSelectors: []*metav1.LabelSelector{
|
||||
{
|
||||
MatchLabels: map[string]string{"k1": "v1"},
|
||||
},
|
||||
{
|
||||
MatchLabels: map[string]string{"k2": "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedStr: "k1=v1 or k2=v2",
|
||||
},
|
||||
{
|
||||
name: "or between two label groups",
|
||||
orLabelSelector: &OrLabelSelector{
|
||||
OrLabelSelectors: []*metav1.LabelSelector{
|
||||
{
|
||||
MatchLabels: map[string]string{"k1": "v1", "k2": "v2"},
|
||||
},
|
||||
{
|
||||
MatchLabels: map[string]string{"a1": "b1", "a2": "b2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedStr: "k1=v1,k2=v2 or a1=b1,a2=b2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expectedStr, test.orLabelSelector.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetOfOrLabelSelector(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputStr string
|
||||
expectedSelector *OrLabelSelector
|
||||
}{
|
||||
{
|
||||
name: "or between two labels",
|
||||
inputStr: "k1=v1 or k2=v2",
|
||||
expectedSelector: &OrLabelSelector{
|
||||
OrLabelSelectors: []*metav1.LabelSelector{
|
||||
{
|
||||
MatchLabels: map[string]string{"k1": "v1"},
|
||||
},
|
||||
{
|
||||
MatchLabels: map[string]string{"k2": "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "or between two label groups",
|
||||
inputStr: "k1=v1,k2=v2 or a1=b1,a2=b2",
|
||||
expectedSelector: &OrLabelSelector{
|
||||
OrLabelSelectors: []*metav1.LabelSelector{
|
||||
{
|
||||
MatchLabels: map[string]string{"k1": "v1", "k2": "v2"},
|
||||
},
|
||||
{
|
||||
MatchLabels: map[string]string{"a1": "b1", "a2": "b2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
selector := &OrLabelSelector{}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
require.Nil(t, selector.Set(test.inputStr))
|
||||
assert.Equal(t, len(test.expectedSelector.OrLabelSelectors), len(selector.OrLabelSelectors))
|
||||
assert.Equal(t, test.expectedSelector.String(), selector.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeOfOrLabelSelector(t *testing.T) {
|
||||
selector := &OrLabelSelector{}
|
||||
assert.Equal(t, "orLabelSelector", selector.Type())
|
||||
}
|
||||
@@ -199,6 +199,18 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
}
|
||||
d.Printf("Label selector:\t%s\n", s)
|
||||
|
||||
d.Println()
|
||||
if len(spec.OrLabelSelectors) == 0 {
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
orLabelSelectors := []string{}
|
||||
for _, v := range spec.OrLabelSelectors {
|
||||
orLabelSelectors = append(orLabelSelectors, metav1.FormatLabelSelector(v))
|
||||
}
|
||||
s = strings.Join(orLabelSelectors, " or ")
|
||||
}
|
||||
d.Printf("Or label selector:\t%s\n", s)
|
||||
|
||||
d.Println()
|
||||
d.Printf("Storage Location:\t%s\n", spec.StorageLocation)
|
||||
|
||||
|
||||
@@ -91,6 +91,8 @@ Resources:
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location: backup-location
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
@@ -153,6 +155,8 @@ Resources:
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location: backup-location
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
@@ -208,6 +212,8 @@ Resources:
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location: backup-location
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
|
||||
@@ -146,6 +146,18 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
||||
}
|
||||
d.Printf("Label selector:\t%s\n", s)
|
||||
|
||||
d.Println()
|
||||
if len(restore.Spec.OrLabelSelectors) == 0 {
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
orLabelSelectors := []string{}
|
||||
for _, v := range restore.Spec.OrLabelSelectors {
|
||||
orLabelSelectors = append(orLabelSelectors, metav1.FormatLabelSelector(v))
|
||||
}
|
||||
s = strings.Join(orLabelSelectors, " or ")
|
||||
}
|
||||
d.Printf("Or label selector:\t%s\n", s)
|
||||
|
||||
d.Println()
|
||||
d.Printf("Restore PVs:\t%s\n", BoolPointerString(restore.Spec.RestorePVs, "false", "true", "auto"))
|
||||
|
||||
|
||||
@@ -38,6 +38,8 @@ Backup Template:
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location:
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
@@ -82,6 +84,8 @@ Backup Template:
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location:
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
@@ -33,7 +34,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/clock"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -88,6 +88,7 @@ type backupReconciler struct {
|
||||
volumeSnapshotClient snapshotterClientSet.Interface
|
||||
credentialFileStore credentials.FileStore
|
||||
maxConcurrentK8SConnections int
|
||||
defaultSnapshotMoveData bool
|
||||
}
|
||||
|
||||
func NewBackupReconciler(
|
||||
@@ -110,9 +111,9 @@ func NewBackupReconciler(
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter,
|
||||
formatFlag logging.Format,
|
||||
volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister,
|
||||
volumeSnapshotClient snapshotterClientSet.Interface,
|
||||
credentialStore credentials.FileStore,
|
||||
maxConcurrentK8SConnections int,
|
||||
defaultSnapshotMoveData bool,
|
||||
) *backupReconciler {
|
||||
b := &backupReconciler{
|
||||
ctx: ctx,
|
||||
@@ -135,9 +136,9 @@ func NewBackupReconciler(
|
||||
backupStoreGetter: backupStoreGetter,
|
||||
formatFlag: formatFlag,
|
||||
volumeSnapshotLister: volumeSnapshotLister,
|
||||
volumeSnapshotClient: volumeSnapshotClient,
|
||||
credentialFileStore: credentialStore,
|
||||
maxConcurrentK8SConnections: maxConcurrentK8SConnections,
|
||||
defaultSnapshotMoveData: defaultSnapshotMoveData,
|
||||
}
|
||||
b.updateTotalBackupMetric()
|
||||
return b
|
||||
@@ -353,6 +354,10 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
|
||||
request.Spec.DefaultVolumesToFsBackup = &b.defaultVolumesToFsBackup
|
||||
}
|
||||
|
||||
if request.Spec.SnapshotMoveData == nil {
|
||||
request.Spec.SnapshotMoveData = &b.defaultSnapshotMoveData
|
||||
}
|
||||
|
||||
// find which storage location to use
|
||||
var serverSpecified bool
|
||||
if request.Spec.StorageLocation == "" {
|
||||
@@ -469,7 +474,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
|
||||
request.Status.ValidationErrors = append(request.Status.ValidationErrors, "encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified")
|
||||
}
|
||||
|
||||
if request.Spec.ResourcePolicy != nil && request.Spec.ResourcePolicy.Kind == resourcepolicies.ConfigmapRefType {
|
||||
if request.Spec.ResourcePolicy != nil && strings.EqualFold(request.Spec.ResourcePolicy.Kind, resourcepolicies.ConfigmapRefType) {
|
||||
policiesConfigmap := &corev1api.ConfigMap{}
|
||||
err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: request.Namespace, Name: request.Spec.ResourcePolicy.Name}, policiesConfigmap)
|
||||
if err != nil {
|
||||
@@ -533,8 +538,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B
|
||||
if len(errors) > 0 {
|
||||
return nil, errors
|
||||
}
|
||||
allLocations := &velerov1api.VolumeSnapshotLocationList{}
|
||||
err := b.kbClient.List(context.Background(), allLocations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()})
|
||||
vsls := &velerov1api.VolumeSnapshotLocationList{}
|
||||
err := b.kbClient.List(context.Background(), vsls, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()})
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error listing volume snapshot locations: %v", err))
|
||||
return nil, errors
|
||||
@@ -542,8 +547,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B
|
||||
|
||||
// build a map of provider->list of all locations for the provider
|
||||
allProviderLocations := make(map[string][]*velerov1api.VolumeSnapshotLocation)
|
||||
for i := range allLocations.Items {
|
||||
loc := allLocations.Items[i]
|
||||
for i := range vsls.Items {
|
||||
loc := vsls.Items[i]
|
||||
allProviderLocations[loc.Spec.Provider] = append(allProviderLocations[loc.Spec.Provider], &loc)
|
||||
}
|
||||
|
||||
@@ -648,65 +653,15 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error {
|
||||
fatalErrs = append(fatalErrs, err)
|
||||
}
|
||||
|
||||
// Empty slices here so that they can be passed in to the persistBackup call later, regardless of whether or not CSI's enabled.
|
||||
// This way, we only make the Lister call if the feature flag's on.
|
||||
var volumeSnapshots []snapshotv1api.VolumeSnapshot
|
||||
var volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent
|
||||
var volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass
|
||||
if boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData) {
|
||||
backupLog.Info("backup SnapshotMoveData is set to true, skip VolumeSnapshot resource persistence.")
|
||||
} else if features.IsEnabled(velerov1api.CSIFeatureFlag) {
|
||||
selector := label.NewSelectorForBackup(backup.Name)
|
||||
vscList := &snapshotv1api.VolumeSnapshotContentList{}
|
||||
|
||||
if b.volumeSnapshotLister != nil {
|
||||
tmpVSs, err := b.volumeSnapshotLister.List(label.NewSelectorForBackup(backup.Name))
|
||||
if err != nil {
|
||||
backupLog.Error(err)
|
||||
}
|
||||
for _, vs := range tmpVSs {
|
||||
volumeSnapshots = append(volumeSnapshots, *vs)
|
||||
}
|
||||
}
|
||||
|
||||
backup.CSISnapshots = volumeSnapshots
|
||||
|
||||
err = b.kbClient.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
backupLog.Error(err)
|
||||
}
|
||||
if len(vscList.Items) >= 0 {
|
||||
volumeSnapshotContents = vscList.Items
|
||||
}
|
||||
|
||||
vsClassSet := sets.NewString()
|
||||
for index := range volumeSnapshotContents {
|
||||
// persist the volumesnapshotclasses referenced by vsc
|
||||
if volumeSnapshotContents[index].Spec.VolumeSnapshotClassName != nil && !vsClassSet.Has(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName) {
|
||||
vsClass := &snapshotv1api.VolumeSnapshotClass{}
|
||||
if err := b.kbClient.Get(context.TODO(), kbclient.ObjectKey{Name: *volumeSnapshotContents[index].Spec.VolumeSnapshotClassName}, vsClass); err != nil {
|
||||
backupLog.Error(err)
|
||||
} else {
|
||||
vsClassSet.Insert(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName)
|
||||
volumeSnapshotClasses = append(volumeSnapshotClasses, *vsClass)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// native snapshots phase will either be failed or completed right away
|
||||
// https://github.com/vmware-tanzu/velero/blob/de3ea52f0cc478e99efa7b9524c7f353514261a4/pkg/backup/item_backupper.go#L632-L639
|
||||
backup.Status.VolumeSnapshotsAttempted = len(backup.VolumeSnapshots)
|
||||
for _, snap := range backup.VolumeSnapshots {
|
||||
if snap.Status.Phase == volume.SnapshotPhaseCompleted {
|
||||
backup.Status.VolumeSnapshotsCompleted++
|
||||
}
|
||||
}
|
||||
|
||||
backup.Status.CSIVolumeSnapshotsAttempted = len(backup.CSISnapshots)
|
||||
for _, vs := range backup.CSISnapshots {
|
||||
if vs.Status != nil && boolptr.IsSetToTrue(vs.Status.ReadyToUse) {
|
||||
backup.Status.CSIVolumeSnapshotsCompleted++
|
||||
}
|
||||
}
|
||||
volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses := pkgbackup.UpdateBackupCSISnapshotsStatus(b.kbClient, b.volumeSnapshotLister, backup.Backup, backupLog)
|
||||
|
||||
// Iterate over backup item operations and update progress.
|
||||
// Any errors on operations at this point should be added to backup errors.
|
||||
|
||||
@@ -21,11 +21,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapshotfake "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake"
|
||||
snapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions"
|
||||
@@ -43,6 +45,10 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
fakeClient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
@@ -583,6 +589,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
backup *velerov1api.Backup
|
||||
backupLocation *velerov1api.BackupStorageLocation
|
||||
defaultVolumesToFsBackup bool
|
||||
defaultSnapshotMoveData bool
|
||||
expectedResult *velerov1api.Backup
|
||||
backupExists bool
|
||||
existenceCheckError error
|
||||
@@ -615,6 +622,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -651,6 +659,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: "alt-loc",
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -690,6 +699,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: "read-write",
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -727,6 +737,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
TTL: metav1.Duration{Duration: 10 * time.Minute},
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -764,6 +775,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -802,6 +814,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -840,6 +853,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -878,6 +892,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -916,6 +931,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -955,6 +971,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFailed,
|
||||
@@ -994,6 +1011,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.True(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFailed,
|
||||
@@ -1113,6 +1131,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
@@ -1126,6 +1145,129 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
},
|
||||
volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(),
|
||||
},
|
||||
{
|
||||
name: "backup with snapshot data movement set to true and defaultSnapshotMoveData set to false",
|
||||
backup: defaultBackup().SnapshotMoveData(true).Result(),
|
||||
backupLocation: defaultBackupLocation,
|
||||
defaultVolumesToFsBackup: false,
|
||||
defaultSnapshotMoveData: false,
|
||||
expectedResult: &velerov1api.Backup{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Backup",
|
||||
APIVersion: "velero.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "backup-1",
|
||||
Annotations: map[string]string{
|
||||
"velero.io/source-cluster-k8s-major-version": "1",
|
||||
"velero.io/source-cluster-k8s-minor-version": "16",
|
||||
"velero.io/source-cluster-k8s-gitversion": "v1.16.4",
|
||||
"velero.io/resource-timeout": "0s",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"velero.io/storage-location": "loc-1",
|
||||
},
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.True(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
Version: 1,
|
||||
FormatVersion: "1.1.0",
|
||||
StartTimestamp: ×tamp,
|
||||
Expiration: ×tamp,
|
||||
CSIVolumeSnapshotsAttempted: 0,
|
||||
CSIVolumeSnapshotsCompleted: 0,
|
||||
},
|
||||
},
|
||||
volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(),
|
||||
},
|
||||
{
|
||||
name: "backup with snapshot data movement set to false and defaultSnapshotMoveData set to true",
|
||||
backup: defaultBackup().SnapshotMoveData(false).Result(),
|
||||
backupLocation: defaultBackupLocation,
|
||||
defaultVolumesToFsBackup: false,
|
||||
defaultSnapshotMoveData: true,
|
||||
expectedResult: &velerov1api.Backup{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Backup",
|
||||
APIVersion: "velero.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "backup-1",
|
||||
Annotations: map[string]string{
|
||||
"velero.io/source-cluster-k8s-major-version": "1",
|
||||
"velero.io/source-cluster-k8s-minor-version": "16",
|
||||
"velero.io/source-cluster-k8s-gitversion": "v1.16.4",
|
||||
"velero.io/resource-timeout": "0s",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"velero.io/storage-location": "loc-1",
|
||||
},
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.False(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
Version: 1,
|
||||
FormatVersion: "1.1.0",
|
||||
StartTimestamp: ×tamp,
|
||||
Expiration: ×tamp,
|
||||
CSIVolumeSnapshotsAttempted: 1,
|
||||
CSIVolumeSnapshotsCompleted: 0,
|
||||
},
|
||||
},
|
||||
volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(),
|
||||
},
|
||||
{
|
||||
name: "backup with snapshot data movement not set and defaultSnapshotMoveData set to true",
|
||||
backup: defaultBackup().Result(),
|
||||
backupLocation: defaultBackupLocation,
|
||||
defaultVolumesToFsBackup: false,
|
||||
defaultSnapshotMoveData: true,
|
||||
expectedResult: &velerov1api.Backup{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Backup",
|
||||
APIVersion: "velero.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "backup-1",
|
||||
Annotations: map[string]string{
|
||||
"velero.io/source-cluster-k8s-major-version": "1",
|
||||
"velero.io/source-cluster-k8s-minor-version": "16",
|
||||
"velero.io/source-cluster-k8s-gitversion": "v1.16.4",
|
||||
"velero.io/resource-timeout": "0s",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"velero.io/storage-location": "loc-1",
|
||||
},
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
StorageLocation: defaultBackupLocation.Name,
|
||||
DefaultVolumesToFsBackup: boolptr.False(),
|
||||
SnapshotMoveData: boolptr.True(),
|
||||
},
|
||||
Status: velerov1api.BackupStatus{
|
||||
Phase: velerov1api.BackupPhaseFinalizing,
|
||||
Version: 1,
|
||||
FormatVersion: "1.1.0",
|
||||
StartTimestamp: ×tamp,
|
||||
Expiration: ×tamp,
|
||||
CSIVolumeSnapshotsAttempted: 0,
|
||||
CSIVolumeSnapshotsCompleted: 0,
|
||||
},
|
||||
},
|
||||
volumeSnapshot: builder.ForVolumeSnapshot("velero", "testVS").ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, "backup-1")).Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -1178,6 +1320,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
kbClient: fakeClient,
|
||||
defaultBackupLocation: defaultBackupLocation.Name,
|
||||
defaultVolumesToFsBackup: test.defaultVolumesToFsBackup,
|
||||
defaultSnapshotMoveData: test.defaultSnapshotMoveData,
|
||||
backupTracker: NewBackupTracker(),
|
||||
metrics: metrics.NewServerMetrics(),
|
||||
clock: testclocks.NewFakeClock(now),
|
||||
@@ -1528,3 +1671,63 @@ func Test_getLastSuccessBySchedule(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Unit tests to make sure that the backup's status is updated correctly during reconcile.
|
||||
// To clear up confusion whether status can be updated with Patch alone without status writer and not kbClient.Status().Patch()
|
||||
func TestPatchResourceWorksWithStatus(t *testing.T) {
|
||||
type args struct {
|
||||
original *velerov1api.Backup
|
||||
updated *velerov1api.Backup
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "patch backup status",
|
||||
args: args{
|
||||
original: defaultBackup().SnapshotMoveData(false).Result(),
|
||||
updated: defaultBackup().SnapshotMoveData(false).WithStatus(velerov1api.BackupStatus{
|
||||
CSIVolumeSnapshotsCompleted: 1,
|
||||
}).Result(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
error := velerov1api.AddToScheme(scheme)
|
||||
if error != nil {
|
||||
t.Errorf("PatchResource() error = %v", error)
|
||||
}
|
||||
fakeClient := fakeClient.NewClientBuilder().WithScheme(scheme).WithObjects(tt.args.original).Build()
|
||||
fromCluster := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tt.args.original.Name,
|
||||
Namespace: tt.args.original.Namespace,
|
||||
},
|
||||
}
|
||||
// check original exists
|
||||
if err := fakeClient.Get(context.Background(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil {
|
||||
t.Errorf("PatchResource() error = %v", err)
|
||||
}
|
||||
// ignore resourceVersion
|
||||
tt.args.updated.ResourceVersion = fromCluster.ResourceVersion
|
||||
tt.args.original.ResourceVersion = fromCluster.ResourceVersion
|
||||
if err := kubeutil.PatchResource(tt.args.original, tt.args.updated, fakeClient); (err != nil) != tt.wantErr {
|
||||
t.Errorf("PatchResource() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
// check updated exists
|
||||
if err := fakeClient.Get(context.Background(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil {
|
||||
t.Errorf("PatchResource() error = %v", err)
|
||||
}
|
||||
|
||||
// check fromCluster is equal to updated
|
||||
if !reflect.DeepEqual(fromCluster, tt.args.updated) {
|
||||
t.Error(cmp.Diff(fromCluster, tt.args.updated))
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -570,7 +570,7 @@ func (r *backupDeletionReconciler) patchDeleteBackupRequest(ctx context.Context,
|
||||
}
|
||||
|
||||
func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) {
|
||||
//TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the bakcup resource is refactored
|
||||
//TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the backup resource is refactored
|
||||
|
||||
// Record original json
|
||||
oldData, err := json.Marshal(backup)
|
||||
|
||||
@@ -29,6 +29,8 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
|
||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||
@@ -40,19 +42,21 @@ import (
|
||||
|
||||
// backupFinalizerReconciler reconciles a Backup object
|
||||
type backupFinalizerReconciler struct {
|
||||
client kbclient.Client
|
||||
clock clocks.WithTickerAndDelayedExecution
|
||||
backupper pkgbackup.Backupper
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
|
||||
backupTracker BackupTracker
|
||||
metrics *metrics.ServerMetrics
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
log logrus.FieldLogger
|
||||
client kbclient.Client
|
||||
volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister
|
||||
clock clocks.WithTickerAndDelayedExecution
|
||||
backupper pkgbackup.Backupper
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
|
||||
backupTracker BackupTracker
|
||||
metrics *metrics.ServerMetrics
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
// NewBackupFinalizerReconciler initializes and returns backupFinalizerReconciler struct.
|
||||
func NewBackupFinalizerReconciler(
|
||||
client kbclient.Client,
|
||||
volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister,
|
||||
clock clocks.WithTickerAndDelayedExecution,
|
||||
backupper pkgbackup.Backupper,
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager,
|
||||
@@ -187,6 +191,7 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
backup.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()}
|
||||
recordBackupMetrics(log, backup, outBackupFile, r.metrics, true)
|
||||
|
||||
pkgbackup.UpdateBackupCSISnapshotsStatus(r.client, r.volumeSnapshotLister, backup, log)
|
||||
// update backup metadata in object store
|
||||
backupJSON := new(bytes.Buffer)
|
||||
if err := encode.To(backup, "json", backupJSON); err != nil {
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -43,12 +44,14 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotestmocks "github.com/vmware-tanzu/velero/pkg/test/mocks"
|
||||
)
|
||||
|
||||
func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeClock *testclocks.FakeClock) (*backupFinalizerReconciler, *fakeBackupper) {
|
||||
func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeVolumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, fakeClock *testclocks.FakeClock) (*backupFinalizerReconciler, *fakeBackupper) {
|
||||
backupper := new(fakeBackupper)
|
||||
return NewBackupFinalizerReconciler(
|
||||
fakeClient,
|
||||
fakeVolumeSnapshotLister,
|
||||
fakeClock,
|
||||
backupper,
|
||||
func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
@@ -160,7 +163,10 @@ func TestBackupFinalizerReconcile(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...)
|
||||
reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeClock)
|
||||
|
||||
fakeVolumeSnapshotLister := velerotestmocks.NewVolumeSnapshotLister(t)
|
||||
|
||||
reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeVolumeSnapshotLister, fakeClock)
|
||||
pluginManager.On("CleanupClients").Return(nil)
|
||||
backupStore.On("GetBackupItemOperations", test.backup.Name).Return(test.backupOperations, nil)
|
||||
backupStore.On("GetBackupContents", mock.Anything).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil)
|
||||
|
||||
@@ -275,6 +275,8 @@ func (c *backupOperationsReconciler) updateBackupAndOperationsJSON(
|
||||
return nil
|
||||
}
|
||||
|
||||
// check progress of backupItemOperations
|
||||
// return: inProgressOperations, changes, completedCount, failedCount, errs
|
||||
func getBackupItemOperationProgress(
|
||||
backup *velerov1api.Backup,
|
||||
pluginManager clientmgmt.Manager,
|
||||
|
||||
@@ -123,9 +123,9 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
// Add finalizer
|
||||
// Logic for clear resources when datadownload been deleted
|
||||
if dd.DeletionTimestamp.IsZero() { // add finalizer for all cr at beginning
|
||||
if !isDataDownloadInFinalState(dd) && !controllerutil.ContainsFinalizer(dd, dataUploadDownloadFinalizer) {
|
||||
if !isDataDownloadInFinalState(dd) && !controllerutil.ContainsFinalizer(dd, DataUploadDownloadFinalizer) {
|
||||
succeeded, err := r.exclusiveUpdateDataDownload(ctx, dd, func(dd *velerov2alpha1api.DataDownload) {
|
||||
controllerutil.AddFinalizer(dd, dataUploadDownloadFinalizer)
|
||||
controllerutil.AddFinalizer(dd, DataUploadDownloadFinalizer)
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to add finalizer with error %s for %s/%s", err.Error(), dd.Namespace, dd.Name)
|
||||
@@ -135,7 +135,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
}
|
||||
} else if controllerutil.ContainsFinalizer(dd, dataUploadDownloadFinalizer) && !dd.Spec.Cancel && !isDataDownloadInFinalState(dd) {
|
||||
} else if controllerutil.ContainsFinalizer(dd, DataUploadDownloadFinalizer) && !dd.Spec.Cancel && !isDataDownloadInFinalState(dd) {
|
||||
// when delete cr we need to clear up internal resources created by Velero, here we use the cancel mechanism
|
||||
// to help clear up resources instead of clear them directly in case of some conflict with Expose action
|
||||
if err := UpdateDataDownloadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataDownload *velerov2alpha1api.DataDownload) {
|
||||
@@ -287,12 +287,13 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
} else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress {
|
||||
log.Info("Data download is in progress")
|
||||
if dd.Spec.Cancel {
|
||||
log.Info("Data download is being canceled")
|
||||
fsRestore := r.dataPathMgr.GetAsyncBR(dd.Name)
|
||||
if fsRestore == nil {
|
||||
r.OnDataDownloadCancelled(ctx, dd.GetNamespace(), dd.GetName())
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Data download is being canceled")
|
||||
// Update status to Canceling.
|
||||
original := dd.DeepCopy()
|
||||
dd.Status.Phase = velerov2alpha1api.DataDownloadPhaseCanceling
|
||||
@@ -300,7 +301,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
log.WithError(err).Error("error updating data download status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
fsRestore.Cancel()
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
@@ -309,9 +309,11 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
} else {
|
||||
// put the finilizer remove action here for all cr will goes to the final status, we could check finalizer and do remove action in final status
|
||||
// instead of intermediate state
|
||||
if isDataDownloadInFinalState(dd) && !dd.DeletionTimestamp.IsZero() && controllerutil.ContainsFinalizer(dd, dataUploadDownloadFinalizer) {
|
||||
// remove finalizer no matter whether the cr is being deleted or not for it is no longer needed when internal resources are all cleaned up
|
||||
// also in final status cr won't block the direct delete of the velero namespace
|
||||
if isDataDownloadInFinalState(dd) && controllerutil.ContainsFinalizer(dd, DataUploadDownloadFinalizer) {
|
||||
original := dd.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(dd, dataUploadDownloadFinalizer)
|
||||
controllerutil.RemoveFinalizer(dd, DataUploadDownloadFinalizer)
|
||||
if err := r.client.Patch(ctx, dd, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("error to remove finalizer")
|
||||
}
|
||||
|
||||
@@ -299,7 +299,7 @@ func TestDataDownloadReconcile(t *testing.T) {
|
||||
name: "dataDownload with enabled cancel",
|
||||
dd: func() *velerov2alpha1api.DataDownload {
|
||||
dd := dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result()
|
||||
controllerutil.AddFinalizer(dd, dataUploadDownloadFinalizer)
|
||||
controllerutil.AddFinalizer(dd, DataUploadDownloadFinalizer)
|
||||
dd.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
return dd
|
||||
}(),
|
||||
@@ -312,12 +312,12 @@ func TestDataDownloadReconcile(t *testing.T) {
|
||||
name: "dataDownload with remove finalizer and should not be retrieved",
|
||||
dd: func() *velerov2alpha1api.DataDownload {
|
||||
dd := dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseFailed).Cancel(true).Result()
|
||||
controllerutil.AddFinalizer(dd, dataUploadDownloadFinalizer)
|
||||
controllerutil.AddFinalizer(dd, DataUploadDownloadFinalizer)
|
||||
dd.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
return dd
|
||||
}(),
|
||||
checkFunc: func(dd velerov2alpha1api.DataDownload) bool {
|
||||
return !controllerutil.ContainsFinalizer(&dd, dataUploadDownloadFinalizer)
|
||||
return !controllerutil.ContainsFinalizer(&dd, DataUploadDownloadFinalizer)
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -428,7 +428,7 @@ func TestDataDownloadReconcile(t *testing.T) {
|
||||
assert.Contains(t, dd.Status.Message, test.expectedStatusMsg)
|
||||
}
|
||||
if test.dd.Namespace == velerov1api.DefaultNamespace {
|
||||
if controllerutil.ContainsFinalizer(test.dd, dataUploadDownloadFinalizer) {
|
||||
if controllerutil.ContainsFinalizer(test.dd, DataUploadDownloadFinalizer) {
|
||||
assert.True(t, true, apierrors.IsNotFound(err))
|
||||
} else {
|
||||
require.Nil(t, err)
|
||||
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
const (
|
||||
dataUploadDownloadRequestor = "snapshot-data-upload-download"
|
||||
acceptNodeLabelKey = "velero.io/accepted-by"
|
||||
dataUploadDownloadFinalizer = "velero.io/data-upload-download-finalizer"
|
||||
DataUploadDownloadFinalizer = "velero.io/data-upload-download-finalizer"
|
||||
preparingMonitorFrequency = time.Minute
|
||||
)
|
||||
|
||||
@@ -132,9 +132,9 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
|
||||
// Logic for clear resources when dataupload been deleted
|
||||
if du.DeletionTimestamp.IsZero() { // add finalizer for all cr at beginning
|
||||
if !isDataUploadInFinalState(du) && !controllerutil.ContainsFinalizer(du, dataUploadDownloadFinalizer) {
|
||||
if !isDataUploadInFinalState(du) && !controllerutil.ContainsFinalizer(du, DataUploadDownloadFinalizer) {
|
||||
succeeded, err := r.exclusiveUpdateDataUpload(ctx, du, func(du *velerov2alpha1api.DataUpload) {
|
||||
controllerutil.AddFinalizer(du, dataUploadDownloadFinalizer)
|
||||
controllerutil.AddFinalizer(du, DataUploadDownloadFinalizer)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -145,7 +145,7 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
}
|
||||
} else if controllerutil.ContainsFinalizer(du, dataUploadDownloadFinalizer) && !du.Spec.Cancel && !isDataUploadInFinalState(du) {
|
||||
} else if controllerutil.ContainsFinalizer(du, DataUploadDownloadFinalizer) && !du.Spec.Cancel && !isDataUploadInFinalState(du) {
|
||||
// when delete cr we need to clear up internal resources created by Velero, here we use the cancel mechanism
|
||||
// to help clear up resources instead of clear them directly in case of some conflict with Expose action
|
||||
if err := UpdateDataUploadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataUpload *velerov2alpha1api.DataUpload) {
|
||||
@@ -177,7 +177,10 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
exposeParam := r.setupExposeParam(du)
|
||||
exposeParam, err := r.setupExposeParam(du)
|
||||
if err != nil {
|
||||
return r.errorOut(ctx, du, err, "failed to set exposer parameters", log)
|
||||
}
|
||||
|
||||
// Expose() will trigger to create one pod whose volume is restored by a given volume snapshot,
|
||||
// but the pod maybe is not in the same node of the current controller, so we need to return it here.
|
||||
@@ -289,13 +292,15 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
} else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress {
|
||||
log.Info("Data upload is in progress")
|
||||
if du.Spec.Cancel {
|
||||
fsBackup := r.dataPathMgr.GetAsyncBR(du.Name)
|
||||
if fsBackup == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Info("Data upload is being canceled")
|
||||
|
||||
// Update status to Canceling.
|
||||
fsBackup := r.dataPathMgr.GetAsyncBR(du.Name)
|
||||
if fsBackup == nil {
|
||||
r.OnDataUploadCancelled(ctx, du.GetNamespace(), du.GetName())
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Update status to Canceling
|
||||
original := du.DeepCopy()
|
||||
du.Status.Phase = velerov2alpha1api.DataUploadPhaseCanceling
|
||||
if err := r.client.Patch(ctx, du, client.MergeFrom(original)); err != nil {
|
||||
@@ -308,10 +313,12 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return ctrl.Result{}, nil
|
||||
} else {
|
||||
// put the finilizer remove action here for all cr will goes to the final status, we could check finalizer and do remove action in final status
|
||||
// instead of intermediate state
|
||||
if isDataUploadInFinalState(du) && !du.DeletionTimestamp.IsZero() && controllerutil.ContainsFinalizer(du, dataUploadDownloadFinalizer) {
|
||||
// instead of intermediate state.
|
||||
// remove finalizer no matter whether the cr is being deleted or not for it is no longer needed when internal resources are all cleaned up
|
||||
// also in final status cr won't block the direct delete of the velero namespace
|
||||
if isDataUploadInFinalState(du) && controllerutil.ContainsFinalizer(du, DataUploadDownloadFinalizer) {
|
||||
original := du.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(du, dataUploadDownloadFinalizer)
|
||||
controllerutil.RemoveFinalizer(du, DataUploadDownloadFinalizer)
|
||||
if err := r.client.Patch(ctx, du, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("error to remove finalizer")
|
||||
}
|
||||
@@ -733,18 +740,33 @@ func (r *DataUploadReconciler) closeDataPath(ctx context.Context, duName string)
|
||||
r.dataPathMgr.RemoveAsyncBR(duName)
|
||||
}
|
||||
|
||||
func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload) interface{} {
|
||||
func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload) (interface{}, error) {
|
||||
if du.Spec.SnapshotType == velerov2alpha1api.SnapshotTypeCSI {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
err := r.client.Get(context.Background(), types.NamespacedName{
|
||||
Namespace: du.Spec.SourceNamespace,
|
||||
Name: du.Spec.SourcePVC,
|
||||
}, pvc)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
|
||||
accessMode := exposer.AccessModeFileSystem
|
||||
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1.PersistentVolumeBlock {
|
||||
accessMode = exposer.AccessModeBlock
|
||||
}
|
||||
|
||||
return &exposer.CSISnapshotExposeParam{
|
||||
SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot,
|
||||
SourceNamespace: du.Spec.SourceNamespace,
|
||||
StorageClass: du.Spec.CSISnapshot.StorageClass,
|
||||
HostingPodLabels: map[string]string{velerov1api.DataUploadLabel: du.Name},
|
||||
AccessMode: exposer.AccessModeFileSystem,
|
||||
AccessMode: accessMode,
|
||||
Timeout: du.Spec.OperationTimeout.Duration,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *DataUploadReconciler) setupWaitExposePara(du *velerov2alpha1api.DataUpload) interface{} {
|
||||
|
||||
@@ -306,6 +306,7 @@ func TestReconcile(t *testing.T) {
|
||||
name string
|
||||
du *velerov2alpha1api.DataUpload
|
||||
pod *corev1.Pod
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
snapshotExposerList map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer
|
||||
dataMgr *datapath.Manager
|
||||
expectedProcessed bool
|
||||
@@ -345,11 +346,21 @@ func TestReconcile(t *testing.T) {
|
||||
}, {
|
||||
name: "Dataupload should be accepted",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").Result(),
|
||||
expectedProcessed: false,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail to get PVC information",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "wrong-pvc"}).Result(),
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "failed to get PVC",
|
||||
},
|
||||
{
|
||||
name: "Dataupload should be prepared",
|
||||
du: dataUploadBuilder().SnapshotType(fakeSnapshotType).Result(),
|
||||
@@ -399,7 +410,7 @@ func TestReconcile(t *testing.T) {
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).SnapshotType(fakeSnapshotType).Result()
|
||||
controllerutil.AddFinalizer(du, dataUploadDownloadFinalizer)
|
||||
controllerutil.AddFinalizer(du, DataUploadDownloadFinalizer)
|
||||
du.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
return du
|
||||
}(),
|
||||
@@ -415,13 +426,13 @@ func TestReconcile(t *testing.T) {
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).SnapshotType(fakeSnapshotType).Cancel(true).Result()
|
||||
controllerutil.AddFinalizer(du, dataUploadDownloadFinalizer)
|
||||
controllerutil.AddFinalizer(du, DataUploadDownloadFinalizer)
|
||||
du.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
return du
|
||||
}(),
|
||||
expectedProcessed: false,
|
||||
checkFunc: func(du velerov2alpha1api.DataUpload) bool {
|
||||
return !controllerutil.ContainsFinalizer(&du, dataUploadDownloadFinalizer)
|
||||
return !controllerutil.ContainsFinalizer(&du, DataUploadDownloadFinalizer)
|
||||
},
|
||||
expectedRequeue: ctrl.Result{},
|
||||
},
|
||||
@@ -448,6 +459,11 @@ func TestReconcile(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if test.pvc != nil {
|
||||
err = r.client.Create(ctx, test.pvc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if test.dataMgr != nil {
|
||||
r.dataPathMgr = test.dataMgr
|
||||
} else {
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
@@ -187,7 +188,7 @@ func (c *gcReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re
|
||||
log.Info("Creating a new deletion request")
|
||||
ndbr := pkgbackup.NewDeleteBackupRequest(backup.Name, string(backup.UID))
|
||||
ndbr.SetNamespace(backup.Namespace)
|
||||
if err := c.Create(ctx, ndbr); err != nil {
|
||||
if err := veleroclient.CreateRetryGenerateName(c, ctx, ndbr); err != nil {
|
||||
log.WithError(err).Error("error creating DeleteBackupRequests")
|
||||
return ctrl.Result{}, errors.Wrap(err, "error creating DeleteBackupRequest")
|
||||
}
|
||||
|
||||
@@ -101,8 +101,6 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
)
|
||||
}
|
||||
|
||||
log.Info("PodVolumeBackup starting")
|
||||
|
||||
// Only process items for this node.
|
||||
if pvb.Spec.Node != r.nodeName {
|
||||
return ctrl.Result{}, nil
|
||||
@@ -116,6 +114,8 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("PodVolumeBackup starting")
|
||||
|
||||
callbacks := datapath.Callbacks{
|
||||
OnCompleted: r.OnDataPathCompleted,
|
||||
OnFailed: r.OnDataPathFailed,
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -101,6 +102,7 @@ type restoreReconciler struct {
|
||||
logFormat logging.Format
|
||||
clock clock.WithTickerAndDelayedExecution
|
||||
defaultItemOperationTimeout time.Duration
|
||||
disableInformerCache bool
|
||||
|
||||
newPluginManager func(logger logrus.FieldLogger) clientmgmt.Manager
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
@@ -123,6 +125,7 @@ func NewRestoreReconciler(
|
||||
metrics *metrics.ServerMetrics,
|
||||
logFormat logging.Format,
|
||||
defaultItemOperationTimeout time.Duration,
|
||||
disableInformerCache bool,
|
||||
) *restoreReconciler {
|
||||
r := &restoreReconciler{
|
||||
ctx: ctx,
|
||||
@@ -135,6 +138,7 @@ func NewRestoreReconciler(
|
||||
logFormat: logFormat,
|
||||
clock: &clock.RealClock{},
|
||||
defaultItemOperationTimeout: defaultItemOperationTimeout,
|
||||
disableInformerCache: disableInformerCache,
|
||||
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
@@ -373,7 +377,7 @@ func (r *restoreReconciler) validateAndComplete(restore *api.Restore) (backupInf
|
||||
}
|
||||
|
||||
var resourceModifiers *resourcemodifiers.ResourceModifiers = nil
|
||||
if restore.Spec.ResourceModifier != nil && restore.Spec.ResourceModifier.Kind == resourcemodifiers.ConfigmapRefType {
|
||||
if restore.Spec.ResourceModifier != nil && strings.EqualFold(restore.Spec.ResourceModifier.Kind, resourcemodifiers.ConfigmapRefType) {
|
||||
ResourceModifierConfigMap := &corev1api.ConfigMap{}
|
||||
err := r.kbClient.Get(context.Background(), client.ObjectKey{Namespace: restore.Namespace, Name: restore.Spec.ResourceModifier.Name}, ResourceModifierConfigMap)
|
||||
if err != nil {
|
||||
@@ -511,6 +515,11 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
|
||||
return errors.Wrap(err, "error fetching volume snapshots metadata")
|
||||
}
|
||||
|
||||
csiVolumeSnapshots, err := backupStore.GetCSIVolumeSnapshots(restore.Spec.BackupName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to fetch CSI VolumeSnapshots metadata")
|
||||
}
|
||||
|
||||
restoreLog.Info("starting restore")
|
||||
|
||||
var podVolumeBackups []*api.PodVolumeBackup
|
||||
@@ -519,13 +528,15 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
|
||||
}
|
||||
|
||||
restoreReq := &pkgrestore.Request{
|
||||
Log: restoreLog,
|
||||
Restore: restore,
|
||||
Backup: info.backup,
|
||||
PodVolumeBackups: podVolumeBackups,
|
||||
VolumeSnapshots: volumeSnapshots,
|
||||
BackupReader: backupFile,
|
||||
ResourceModifiers: resourceModifiers,
|
||||
Log: restoreLog,
|
||||
Restore: restore,
|
||||
Backup: info.backup,
|
||||
PodVolumeBackups: podVolumeBackups,
|
||||
VolumeSnapshots: volumeSnapshots,
|
||||
BackupReader: backupFile,
|
||||
ResourceModifiers: resourceModifiers,
|
||||
DisableInformerCache: r.disableInformerCache,
|
||||
CSIVolumeSnapshots: csiVolumeSnapshots,
|
||||
}
|
||||
restoreWarnings, restoreErrors := r.restorer.RestoreWithResolvers(restoreReq, actionsResolver, pluginManager)
|
||||
|
||||
@@ -671,14 +682,13 @@ func (r *restoreReconciler) deleteExternalResources(restore *api.Restore) error
|
||||
|
||||
backupInfo, err := r.fetchBackupInfo(restore.Spec.BackupName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
r.logger.Errorf("got not found error: %v, skip deleting the restore files in object storage", err)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, fmt.Sprintf("can't get backup info, backup: %s", restore.Spec.BackupName))
|
||||
}
|
||||
|
||||
// if storage locations is read-only, skip deletion
|
||||
if backupInfo.location.Spec.AccessMode == api.BackupStorageLocationAccessModeReadOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete restore files in object storage
|
||||
pluginManager := r.newPluginManager(r.logger)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -114,6 +115,7 @@ func TestFetchBackupInfo(t *testing.T) {
|
||||
metrics.NewServerMetrics(),
|
||||
formatFlag,
|
||||
60*time.Minute,
|
||||
false,
|
||||
)
|
||||
|
||||
if test.backupStoreError == nil {
|
||||
@@ -191,6 +193,7 @@ func TestProcessQueueItemSkips(t *testing.T) {
|
||||
metrics.NewServerMetrics(),
|
||||
formatFlag,
|
||||
60*time.Minute,
|
||||
false,
|
||||
)
|
||||
|
||||
_, err := r.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{
|
||||
@@ -445,6 +448,7 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
metrics.NewServerMetrics(),
|
||||
formatFlag,
|
||||
60*time.Minute,
|
||||
false,
|
||||
)
|
||||
|
||||
r.clock = clocktesting.NewFakeClock(now)
|
||||
@@ -468,6 +472,7 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
}
|
||||
if test.expectedRestorerCall != nil {
|
||||
backupStore.On("GetBackupContents", test.backup.Name).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil)
|
||||
backupStore.On("GetCSIVolumeSnapshots", test.backup.Name).Return([]*snapshotv1api.VolumeSnapshot{}, nil)
|
||||
|
||||
restorer.On("RestoreWithResolvers", mock.Anything, mock.Anything, mock.Anything, mock.Anything,
|
||||
mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors)
|
||||
@@ -616,6 +621,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) {
|
||||
metrics.NewServerMetrics(),
|
||||
formatFlag,
|
||||
60*time.Minute,
|
||||
false,
|
||||
)
|
||||
|
||||
restore := &velerov1api.Restore{
|
||||
@@ -708,6 +714,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
metrics.NewServerMetrics(),
|
||||
formatFlag,
|
||||
60*time.Minute,
|
||||
false,
|
||||
)
|
||||
|
||||
restore := &velerov1api.Restore{
|
||||
@@ -760,7 +767,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupKind: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: replace\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n",
|
||||
"sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: replace\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n",
|
||||
},
|
||||
}
|
||||
require.NoError(t, r.kbClient.Create(context.Background(), cm1))
|
||||
@@ -776,7 +783,8 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
Spec: velerov1api.RestoreSpec{
|
||||
BackupName: "backup-1",
|
||||
ResourceModifier: &corev1.TypedLocalObjectReference{
|
||||
Kind: resourcemodifiers.ConfigmapRefType,
|
||||
// intentional to ensure case insensitivity works as expected
|
||||
Kind: "confIGMaP",
|
||||
Name: "test-configmap-invalid",
|
||||
},
|
||||
},
|
||||
@@ -788,7 +796,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"sub.yml": "version1: v1\nresourceModifierRules:\n- conditions:\n groupKind: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: replace\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n",
|
||||
"sub.yml": "version1: v1\nresourceModifierRules:\n- conditions:\n groupResource: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: replace\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n",
|
||||
},
|
||||
}
|
||||
require.NoError(t, r.kbClient.Create(context.Background(), invalidVersionCm))
|
||||
@@ -816,7 +824,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupKind: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: invalid\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n",
|
||||
"sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: persistentvolumeclaims\n resourceNameRegex: \".*\"\n namespaces:\n - bar\n - foo\n patches:\n - operation: invalid\n path: \"/spec/storageClassName\"\n value: \"premium\"\n - operation: remove\n path: \"/metadata/labels/test\"\n\n\n",
|
||||
},
|
||||
}
|
||||
require.NoError(t, r.kbClient.Create(context.Background(), invalidOperatorCm))
|
||||
|
||||
@@ -139,18 +139,6 @@ func (r *restoreOperationsReconciler) Reconcile(ctx context.Context, req ctrl.Re
|
||||
return ctrl.Result{}, errors.Wrap(err, "error getting backup info")
|
||||
}
|
||||
|
||||
if info.location.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly {
|
||||
log.Infof("Cannot check progress on Restore operations because backup storage location %s is currently in read-only mode; marking restore PartiallyFailed", info.location.Name)
|
||||
restore.Status.Phase = velerov1api.RestorePhasePartiallyFailed
|
||||
restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()}
|
||||
r.metrics.RegisterRestorePartialFailure(restore.Spec.ScheduleName)
|
||||
err := r.updateRestoreAndOperationsJSON(ctx, original, restore, nil, &itemoperationmap.OperationsForRestore{ErrsSinceUpdate: []string{"BSL is read-only"}}, false, false)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error updating Restore")
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
pluginManager := r.newPluginManager(r.logger)
|
||||
defer pluginManager.CleanupClients()
|
||||
backupStore, err := r.backupStoreGetter.Get(info.location, pluginManager, r.logger)
|
||||
|
||||
@@ -133,10 +133,10 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, paren
|
||||
if !fs.initialized {
|
||||
return errors.New("file system data path is not initialized")
|
||||
}
|
||||
volMode := getPersistentVolumeMode(source)
|
||||
|
||||
go func() {
|
||||
snapshotID, emptySnapshot, err := fs.uploaderProv.RunBackup(fs.ctx, source.ByPath, realSource, tags, forceFull, parentSnapshot, volMode, fs)
|
||||
snapshotID, emptySnapshot, err := fs.uploaderProv.RunBackup(fs.ctx, source.ByPath, realSource, tags, forceFull,
|
||||
parentSnapshot, source.VolMode, fs)
|
||||
|
||||
if err == provider.ErrorCanceled {
|
||||
fs.callbacks.OnCancelled(context.Background(), fs.namespace, fs.jobName)
|
||||
@@ -155,10 +155,8 @@ func (fs *fileSystemBR) StartRestore(snapshotID string, target AccessPoint) erro
|
||||
return errors.New("file system data path is not initialized")
|
||||
}
|
||||
|
||||
volMode := getPersistentVolumeMode(target)
|
||||
|
||||
go func() {
|
||||
err := fs.uploaderProv.RunRestore(fs.ctx, snapshotID, target.ByPath, volMode, fs)
|
||||
err := fs.uploaderProv.RunRestore(fs.ctx, snapshotID, target.ByPath, target.VolMode, fs)
|
||||
|
||||
if err == provider.ErrorCanceled {
|
||||
fs.callbacks.OnCancelled(context.Background(), fs.namespace, fs.jobName)
|
||||
@@ -172,13 +170,6 @@ func (fs *fileSystemBR) StartRestore(snapshotID string, target AccessPoint) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPersistentVolumeMode(source AccessPoint) uploader.PersistentVolumeMode {
|
||||
if source.ByBlock != "" {
|
||||
return uploader.PersistentVolumeBlock
|
||||
}
|
||||
return uploader.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
// UpdateProgress which implement ProgressUpdater interface to update progress status
|
||||
func (fs *fileSystemBR) UpdateProgress(p *uploader.Progress) {
|
||||
if fs.callbacks.OnProgress != nil {
|
||||
|
||||
@@ -53,7 +53,7 @@ type Callbacks struct {
|
||||
// AccessPoint represents an access point that has been exposed to a data path instance
|
||||
type AccessPoint struct {
|
||||
ByPath string
|
||||
ByBlock string
|
||||
VolMode uploader.PersistentVolumeMode
|
||||
}
|
||||
|
||||
// AsyncBR is the interface for asynchronous data path methods
|
||||
|
||||
@@ -18,6 +18,7 @@ package discovery
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -170,7 +171,7 @@ func (h *helper) Refresh() error {
|
||||
}
|
||||
|
||||
h.resources = discovery.FilteredBy(
|
||||
discovery.ResourcePredicateFunc(filterByVerbs),
|
||||
And(filterByVerbs, skipSubresource),
|
||||
serverResources,
|
||||
)
|
||||
|
||||
@@ -240,10 +241,34 @@ func refreshServerGroupsAndResources(discoveryClient serverResourcesInterface, l
|
||||
return serverGroups, serverResources, err
|
||||
}
|
||||
|
||||
// And returns a composite predicate that implements a logical AND of the predicates passed to it.
|
||||
func And(predicates ...discovery.ResourcePredicateFunc) discovery.ResourcePredicate {
|
||||
return and{predicates}
|
||||
}
|
||||
|
||||
type and struct {
|
||||
predicates []discovery.ResourcePredicateFunc
|
||||
}
|
||||
|
||||
func (a and) Match(groupVersion string, r *metav1.APIResource) bool {
|
||||
for _, p := range a.predicates {
|
||||
if !p(groupVersion, r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func filterByVerbs(groupVersion string, r *metav1.APIResource) bool {
|
||||
return discovery.SupportsAllVerbs{Verbs: []string{"list", "create", "get", "delete"}}.Match(groupVersion, r)
|
||||
}
|
||||
|
||||
func skipSubresource(_ string, r *metav1.APIResource) bool {
|
||||
// if we have a slash, then this is a subresource and we shouldn't include it.
|
||||
return !strings.Contains(r.Name, "/")
|
||||
}
|
||||
|
||||
// sortResources sources resources by moving extensions to the end of the slice. The order of all
|
||||
// the other resources is preserved.
|
||||
func sortResources(resources []*metav1.APIResourceList) {
|
||||
|
||||
@@ -128,6 +128,13 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje
|
||||
|
||||
curLog.WithField("vs name", volumeSnapshot.Name).Infof("VS is deleted in namespace %s", volumeSnapshot.Namespace)
|
||||
|
||||
err = csi.RemoveVSCProtect(ctx, e.csiSnapshotClient, vsc.Name, csiExposeParam.Timeout)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to remove protect from volume snapshot content")
|
||||
}
|
||||
|
||||
curLog.WithField("vsc name", vsc.Name).Infof("Removed protect from VSC")
|
||||
|
||||
err = csi.EnsureDeleteVSC(ctx, e.csiSnapshotClient, vsc.Name, csiExposeParam.Timeout)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to delete volume snapshot content")
|
||||
@@ -190,6 +197,7 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
||||
|
||||
backupPodName := ownerObject.Name
|
||||
backupPVCName := ownerObject.Name
|
||||
volumeName := string(ownerObject.UID)
|
||||
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
"owner": ownerObject.Name,
|
||||
@@ -218,7 +226,20 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
||||
|
||||
curLog.WithField("backup pvc", backupPVCName).Info("Backup PVC is bound")
|
||||
|
||||
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: pod.Spec.Volumes[0].Name}}, nil
|
||||
i := 0
|
||||
for i = 0; i < len(pod.Spec.Volumes); i++ {
|
||||
if pod.Spec.Volumes[i].Name == volumeName {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if i == len(pod.Spec.Volumes) {
|
||||
return nil, errors.Errorf("backup pod %s doesn't have the expected backup volume", pod.Name)
|
||||
}
|
||||
|
||||
curLog.WithField("pod", pod.Name).Infof("Backup volume is found in pod at index %v", i)
|
||||
|
||||
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: volumeName}}, nil
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference, vsName string, sourceNamespace string) {
|
||||
@@ -233,9 +254,12 @@ func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.Obj
|
||||
}
|
||||
|
||||
func getVolumeModeByAccessMode(accessMode string) (corev1.PersistentVolumeMode, error) {
|
||||
if accessMode == AccessModeFileSystem {
|
||||
switch accessMode {
|
||||
case AccessModeFileSystem:
|
||||
return corev1.PersistentVolumeFilesystem, nil
|
||||
} else {
|
||||
case AccessModeBlock:
|
||||
return corev1.PersistentVolumeBlock, nil
|
||||
default:
|
||||
return "", errors.Errorf("unsupported access mode %s", accessMode)
|
||||
}
|
||||
}
|
||||
@@ -246,8 +270,9 @@ func (e *csiSnapshotExposer) createBackupVS(ctx context.Context, ownerObject cor
|
||||
|
||||
vs := &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: backupVSName,
|
||||
Namespace: ownerObject.Namespace,
|
||||
Name: backupVSName,
|
||||
Namespace: ownerObject.Namespace,
|
||||
Annotations: snapshotVS.Annotations,
|
||||
// Don't add ownerReference to SnapshotBackup.
|
||||
// The backupPVC should be deleted before backupVS, otherwise, the deletion of backupVS will fail since
|
||||
// backupPVC has its dataSource referring to it
|
||||
@@ -268,7 +293,8 @@ func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject co
|
||||
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: backupVSCName,
|
||||
Name: backupVSCName,
|
||||
Annotations: snapshotVSC.Annotations,
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
VolumeSnapshotRef: corev1.ObjectReference{
|
||||
@@ -280,7 +306,7 @@ func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject co
|
||||
Source: snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: snapshotVSC.Status.SnapshotHandle,
|
||||
},
|
||||
DeletionPolicy: snapshotVSC.Spec.DeletionPolicy,
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
Driver: snapshotVSC.Spec.Driver,
|
||||
VolumeSnapshotClassName: snapshotVSC.Spec.VolumeSnapshotClassName,
|
||||
},
|
||||
@@ -354,6 +380,13 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
|
||||
}
|
||||
|
||||
var gracePeriod int64 = 0
|
||||
volumeMounts, volumeDevices := kube.MakePodPVCAttachment(volumeName, backupPVC.Spec.VolumeMode)
|
||||
|
||||
if label == nil {
|
||||
label = make(map[string]string)
|
||||
}
|
||||
|
||||
label[podGroupLabel] = podGroupSnapshot
|
||||
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -371,16 +404,26 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
|
||||
Labels: label,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: corev1.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
podGroupLabel: podGroupSnapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: podInfo.image,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
Command: []string{"/velero-helper", "pause"},
|
||||
VolumeMounts: []corev1.VolumeMount{{
|
||||
Name: volumeName,
|
||||
MountPath: "/" + volumeName,
|
||||
}},
|
||||
VolumeMounts: volumeMounts,
|
||||
VolumeDevices: volumeDevices,
|
||||
},
|
||||
},
|
||||
ServiceAccountName: podInfo.serviceAccount,
|
||||
|
||||
@@ -37,6 +37,8 @@ import (
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
|
||||
clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
type reactor struct {
|
||||
@@ -58,10 +60,22 @@ func TestExpose(t *testing.T) {
|
||||
UID: "fake-uid",
|
||||
},
|
||||
}
|
||||
|
||||
snapshotClass := "fake-snapshot-class"
|
||||
vsObject := &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
Annotations: map[string]string{
|
||||
"fake-key-1": "fake-value-1",
|
||||
"fake-key-2": "fake-value-2",
|
||||
},
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotSpec{
|
||||
Source: snapshotv1api.VolumeSnapshotSource{
|
||||
VolumeSnapshotContentName: &vscName,
|
||||
},
|
||||
VolumeSnapshotClassName: &snapshotClass,
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
@@ -71,15 +85,23 @@ func TestExpose(t *testing.T) {
|
||||
}
|
||||
|
||||
var restoreSize int64
|
||||
snapshotHandle := "fake-handle"
|
||||
vscObj := &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vsc",
|
||||
Name: vscName,
|
||||
Annotations: map[string]string{
|
||||
"fake-key-3": "fake-value-3",
|
||||
"fake-key-4": "fake-value-4",
|
||||
},
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
DeletionPolicy: snapshotv1api.VolumeSnapshotContentDelete,
|
||||
Driver: "fake-driver",
|
||||
VolumeSnapshotClassName: &snapshotClass,
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotContentStatus{
|
||||
RestoreSize: &restoreSize,
|
||||
RestoreSize: &restoreSize,
|
||||
SnapshotHandle: &snapshotHandle,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -284,6 +306,23 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
err: "error to create backup pod: fake-create-error",
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
Timeout: time.Millisecond,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -317,7 +356,210 @@ func TestExpose(t *testing.T) {
|
||||
}
|
||||
|
||||
err := exposer.Expose(context.Background(), ownerObject, &test.exposeParam)
|
||||
assert.EqualError(t, err, test.err)
|
||||
if err == nil {
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedVS, err := exposer.csiSnapshotClient.VolumeSnapshots(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedVSC, err := exposer.csiSnapshotClient.VolumeSnapshotContents().Get(context.Background(), ownerObject.Name, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expectedVS.Annotations, vsObject.Annotations)
|
||||
assert.Equal(t, *expectedVS.Spec.VolumeSnapshotClassName, *vsObject.Spec.VolumeSnapshotClassName)
|
||||
assert.Equal(t, *expectedVS.Spec.Source.VolumeSnapshotContentName, expectedVSC.Name)
|
||||
|
||||
assert.Equal(t, expectedVSC.Annotations, vscObj.Annotations)
|
||||
assert.Equal(t, expectedVSC.Spec.DeletionPolicy, vscObj.Spec.DeletionPolicy)
|
||||
assert.Equal(t, expectedVSC.Spec.Driver, vscObj.Spec.Driver)
|
||||
assert.Equal(t, *expectedVSC.Spec.VolumeSnapshotClassName, *vscObj.Spec.VolumeSnapshotClassName)
|
||||
} else {
|
||||
assert.EqualError(t, err, test.err)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExpose(t *testing.T) {
|
||||
backup := &velerov1.Backup{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: velerov1.SchemeGroupVersion.String(),
|
||||
Kind: "Backup",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-uid",
|
||||
},
|
||||
}
|
||||
|
||||
backupPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "fake-volume",
|
||||
},
|
||||
{
|
||||
Name: "fake-volume-2",
|
||||
},
|
||||
{
|
||||
Name: string(backup.UID),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backupPodWithoutVolume := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "fake-volume-1",
|
||||
},
|
||||
{
|
||||
Name: "fake-volume-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backupPVC := &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv-name",
|
||||
},
|
||||
}
|
||||
|
||||
backupPV := &corev1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pv-name",
|
||||
},
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
corev1.AddToScheme(scheme)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
kubeClientObj []runtime.Object
|
||||
ownerBackup *velerov1.Backup
|
||||
exposeWaitParam CSISnapshotExposeWaitParam
|
||||
Timeout time.Duration
|
||||
err string
|
||||
expectedResult *ExposeResult
|
||||
}{
|
||||
{
|
||||
name: "backup pod is not found",
|
||||
ownerBackup: backup,
|
||||
exposeWaitParam: CSISnapshotExposeWaitParam{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wait pvc bound fail",
|
||||
ownerBackup: backup,
|
||||
exposeWaitParam: CSISnapshotExposeWaitParam{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
backupPod,
|
||||
},
|
||||
Timeout: time.Second,
|
||||
err: "error to wait backup PVC bound, fake-backup: error to wait for rediness of PVC: error to get pvc velero/fake-backup: persistentvolumeclaims \"fake-backup\" not found",
|
||||
},
|
||||
{
|
||||
name: "backup volume not found in pod",
|
||||
ownerBackup: backup,
|
||||
exposeWaitParam: CSISnapshotExposeWaitParam{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
backupPodWithoutVolume,
|
||||
backupPVC,
|
||||
backupPV,
|
||||
},
|
||||
Timeout: time.Second,
|
||||
err: "backup pod fake-backup doesn't have the expected backup volume",
|
||||
},
|
||||
{
|
||||
name: "succeed",
|
||||
ownerBackup: backup,
|
||||
exposeWaitParam: CSISnapshotExposeWaitParam{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
backupPod,
|
||||
backupPVC,
|
||||
backupPV,
|
||||
},
|
||||
Timeout: time.Second,
|
||||
expectedResult: &ExposeResult{
|
||||
ByPod: ExposeByPod{
|
||||
HostingPod: backupPod,
|
||||
VolumeName: string(backup.UID),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
|
||||
|
||||
fakeClientBuilder := clientFake.NewClientBuilder()
|
||||
fakeClientBuilder = fakeClientBuilder.WithScheme(scheme)
|
||||
|
||||
fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build()
|
||||
|
||||
exposer := csiSnapshotExposer{
|
||||
kubeClient: fakeKubeClient,
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
var ownerObject corev1.ObjectReference
|
||||
if test.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
Kind: test.ownerBackup.Kind,
|
||||
Namespace: test.ownerBackup.Namespace,
|
||||
Name: test.ownerBackup.Name,
|
||||
UID: test.ownerBackup.UID,
|
||||
APIVersion: test.ownerBackup.APIVersion,
|
||||
}
|
||||
}
|
||||
|
||||
test.exposeWaitParam.NodeClient = fakeClient
|
||||
|
||||
result, err := exposer.GetExposed(context.Background(), ownerObject, test.Timeout, &test.exposeWaitParam)
|
||||
if test.err == "" {
|
||||
assert.NoError(t, err)
|
||||
|
||||
if test.expectedResult == nil {
|
||||
assert.Nil(t, result)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expectedResult.ByPod.VolumeName, result.ByPod.VolumeName)
|
||||
assert.Equal(t, test.expectedResult.ByPod.HostingPod.Name, result.ByPod.HostingPod.Name)
|
||||
}
|
||||
} else {
|
||||
assert.EqualError(t, err, test.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1.O
|
||||
return errors.Errorf("Target PVC %s/%s has already been bound, abort", sourceNamespace, targetPVCName)
|
||||
}
|
||||
|
||||
restorePod, err := e.createRestorePod(ctx, ownerObject, hostingPodLabels, selectedNode)
|
||||
restorePod, err := e.createRestorePod(ctx, ownerObject, targetPVC, hostingPodLabels, selectedNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to create restore pod")
|
||||
}
|
||||
@@ -221,7 +221,7 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
|
||||
}
|
||||
|
||||
restorePVName := restorePV.Name
|
||||
restorePV, err = kube.ResetPVBinding(ctx, e.kubeClient.CoreV1(), restorePV, matchLabel)
|
||||
restorePV, err = kube.ResetPVBinding(ctx, e.kubeClient.CoreV1(), restorePV, matchLabel, targetPVC)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to reset binding info for restore PV %s", restorePVName)
|
||||
}
|
||||
@@ -247,7 +247,8 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, label map[string]string, selectedNode string) (*corev1.Pod, error) {
|
||||
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim,
|
||||
label map[string]string, selectedNode string) (*corev1.Pod, error) {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
@@ -260,6 +261,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
}
|
||||
|
||||
var gracePeriod int64 = 0
|
||||
volumeMounts, volumeDevices := kube.MakePodPVCAttachment(volumeName, targetPVC.Spec.VolumeMode)
|
||||
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -283,10 +285,8 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
Image: podInfo.image,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
Command: []string{"/velero-helper", "pause"},
|
||||
VolumeMounts: []corev1.VolumeMount{{
|
||||
Name: volumeName,
|
||||
MountPath: "/" + volumeName,
|
||||
}},
|
||||
VolumeMounts: volumeMounts,
|
||||
VolumeDevices: volumeDevices,
|
||||
},
|
||||
},
|
||||
ServiceAccountName: podInfo.serviceAccount,
|
||||
|
||||
@@ -26,11 +26,13 @@ import (
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/datapath"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
var getVolumeDirectory = kube.GetVolumeDirectory
|
||||
var getVolumeMode = kube.GetVolumeMode
|
||||
var singlePathMatch = kube.SinglePathMatch
|
||||
|
||||
// GetPodVolumeHostPath returns a path that can be accessed from the host for a given volume of a pod
|
||||
@@ -45,7 +47,17 @@ func GetPodVolumeHostPath(ctx context.Context, pod *corev1.Pod, volumeName strin
|
||||
|
||||
logger.WithField("volDir", volDir).Info("Got volume dir")
|
||||
|
||||
pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pod.GetUID()), volDir)
|
||||
volMode, err := getVolumeMode(ctx, logger, pod, volumeName, cli)
|
||||
if err != nil {
|
||||
return datapath.AccessPoint{}, errors.Wrapf(err, "error getting volume mode for volume %s in pod %s", volumeName, pod.Name)
|
||||
}
|
||||
|
||||
volSubDir := "volumes"
|
||||
if volMode == uploader.PersistentVolumeBlock {
|
||||
volSubDir = "volumeDevices"
|
||||
}
|
||||
|
||||
pathGlob := fmt.Sprintf("/host_pods/%s/%s/*/%s", string(pod.GetUID()), volSubDir, volDir)
|
||||
logger.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob")
|
||||
|
||||
path, err := singlePathMatch(pathGlob, fs, logger)
|
||||
@@ -56,6 +68,7 @@ func GetPodVolumeHostPath(ctx context.Context, pod *corev1.Pod, volumeName strin
|
||||
logger.WithField("path", path).Info("Found path matching glob")
|
||||
|
||||
return datapath.AccessPoint{
|
||||
ByPath: path,
|
||||
ByPath: path,
|
||||
VolMode: volMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -29,17 +29,19 @@ import (
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
func TestGetPodVolumeHostPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
getVolumeDirFunc func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (string, error)
|
||||
pathMatchFunc func(string, filesystem.Interface, logrus.FieldLogger) (string, error)
|
||||
pod *corev1.Pod
|
||||
pvc string
|
||||
err string
|
||||
name string
|
||||
getVolumeDirFunc func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (string, error)
|
||||
getVolumeModeFunc func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (uploader.PersistentVolumeMode, error)
|
||||
pathMatchFunc func(string, filesystem.Interface, logrus.FieldLogger) (string, error)
|
||||
pod *corev1.Pod
|
||||
pvc string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "get volume dir fail",
|
||||
@@ -55,6 +57,9 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (string, error) {
|
||||
return "", nil
|
||||
},
|
||||
getVolumeModeFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (uploader.PersistentVolumeMode, error) {
|
||||
return uploader.PersistentVolumeFilesystem, nil
|
||||
},
|
||||
pathMatchFunc: func(string, filesystem.Interface, logrus.FieldLogger) (string, error) {
|
||||
return "", errors.New("fake-error-2")
|
||||
},
|
||||
@@ -62,6 +67,18 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
||||
pvc: "fake-pvc-1",
|
||||
err: "error identifying unique volume path on host for volume fake-pvc-1 in pod fake-pod-2: fake-error-2",
|
||||
},
|
||||
{
|
||||
name: "get block volume dir success",
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (
|
||||
string, error) {
|
||||
return "fake-pvc-1", nil
|
||||
},
|
||||
pathMatchFunc: func(string, filesystem.Interface, logrus.FieldLogger) (string, error) {
|
||||
return "/host_pods/fake-pod-1-id/volumeDevices/kubernetes.io~csi/fake-pvc-1-id", nil
|
||||
},
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, "fake-pod-1").Result(),
|
||||
pvc: "fake-pvc-1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -70,12 +87,18 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
||||
getVolumeDirectory = test.getVolumeDirFunc
|
||||
}
|
||||
|
||||
if test.getVolumeModeFunc != nil {
|
||||
getVolumeMode = test.getVolumeModeFunc
|
||||
}
|
||||
|
||||
if test.pathMatchFunc != nil {
|
||||
singlePathMatch = test.pathMatchFunc
|
||||
}
|
||||
|
||||
_, err := GetPodVolumeHostPath(context.Background(), test.pod, test.pvc, nil, nil, velerotest.NewLogger())
|
||||
assert.EqualError(t, err, test.err)
|
||||
if test.err != "" || err != nil {
|
||||
assert.EqualError(t, err, test.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,9 @@ import (
|
||||
|
||||
const (
|
||||
AccessModeFileSystem = "by-file-system"
|
||||
AccessModeBlock = "by-block-device"
|
||||
podGroupLabel = "velero.io/exposer-pod-group"
|
||||
podGroupSnapshot = "snapshot-exposer"
|
||||
)
|
||||
|
||||
// ExposeResult defines the result of expose.
|
||||
|
||||
@@ -86,6 +86,14 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-plugins",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/var/lib/kubelet/plugins",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "scratch",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
@@ -97,18 +105,26 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
{
|
||||
Name: "node-agent",
|
||||
Image: c.image,
|
||||
Ports: containerPorts(),
|
||||
ImagePullPolicy: pullPolicy,
|
||||
Command: []string{
|
||||
"/velero",
|
||||
},
|
||||
Args: daemonSetArgs,
|
||||
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: &c.privilegedNodeAgent,
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "host-pods",
|
||||
MountPath: "/host_pods",
|
||||
MountPropagation: &mountPropagationMode,
|
||||
},
|
||||
{
|
||||
Name: "host-plugins",
|
||||
MountPath: "/var/lib/kubelet/plugins",
|
||||
MountPropagation: &mountPropagationMode,
|
||||
},
|
||||
{
|
||||
Name: "scratch",
|
||||
MountPath: "/scratch",
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestDaemonSet(t *testing.T) {
|
||||
|
||||
ds = DaemonSet("velero", WithSecret(true))
|
||||
assert.Equal(t, 7, len(ds.Spec.Template.Spec.Containers[0].Env))
|
||||
assert.Equal(t, 3, len(ds.Spec.Template.Spec.Volumes))
|
||||
assert.Equal(t, 4, len(ds.Spec.Template.Spec.Volumes))
|
||||
|
||||
ds = DaemonSet("velero", WithFeatures([]string{"foo,bar,baz"}))
|
||||
assert.Len(t, ds.Spec.Template.Spec.Containers[0].Args, 3)
|
||||
|
||||
@@ -46,6 +46,9 @@ type podTemplateConfig struct {
|
||||
defaultVolumesToFsBackup bool
|
||||
serviceAccountName string
|
||||
uploaderType string
|
||||
privilegedNodeAgent bool
|
||||
defaultSnapshotMoveData bool
|
||||
disableInformerCache bool
|
||||
}
|
||||
|
||||
func WithImage(image string) podTemplateOption {
|
||||
@@ -136,12 +139,30 @@ func WithDefaultVolumesToFsBackup() podTemplateOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithDefaultSnapshotMoveData() podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.defaultSnapshotMoveData = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithDisableInformerCache() podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.disableInformerCache = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithServiceAccountName(sa string) podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.serviceAccountName = sa
|
||||
}
|
||||
}
|
||||
|
||||
func WithPrivilegedNodeAgent() podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.privilegedNodeAgent = true
|
||||
}
|
||||
}
|
||||
|
||||
func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment {
|
||||
// TODO: Add support for server args
|
||||
c := &podTemplateConfig{
|
||||
@@ -167,6 +188,14 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
||||
args = append(args, "--default-volumes-to-fs-backup=true")
|
||||
}
|
||||
|
||||
if c.defaultSnapshotMoveData {
|
||||
args = append(args, "--default-snapshot-move-data=true")
|
||||
}
|
||||
|
||||
if c.disableInformerCache {
|
||||
args = append(args, "--disable-informer-cache=true")
|
||||
}
|
||||
|
||||
if len(c.uploaderType) > 0 {
|
||||
args = append(args, fmt.Sprintf("--uploader-type=%s", c.uploaderType))
|
||||
}
|
||||
|
||||
@@ -64,4 +64,8 @@ func TestDeployment(t *testing.T) {
|
||||
|
||||
deploy = Deployment("velero", WithServiceAccountName("test-sa"))
|
||||
assert.Equal(t, "test-sa", deploy.Spec.Template.Spec.ServiceAccountName)
|
||||
|
||||
deploy = Deployment("velero", WithDisableInformerCache())
|
||||
assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 2)
|
||||
assert.Equal(t, "--disable-informer-cache=true", deploy.Spec.Template.Spec.Containers[0].Args[1])
|
||||
}
|
||||
|
||||
@@ -32,7 +32,11 @@ import (
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
const defaultServiceAccountName = "velero"
|
||||
const (
|
||||
defaultServiceAccountName = "velero"
|
||||
podSecurityLevel = "privileged"
|
||||
podSecurityVersion = "latest"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultVeleroPodCPURequest = "500m"
|
||||
@@ -148,8 +152,12 @@ func Namespace(namespace string) *corev1.Namespace {
|
||||
},
|
||||
}
|
||||
|
||||
ns.Labels["pod-security.kubernetes.io/enforce"] = "privileged"
|
||||
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
ns.Labels["pod-security.kubernetes.io/enforce"] = podSecurityLevel
|
||||
ns.Labels["pod-security.kubernetes.io/enforce-version"] = podSecurityVersion
|
||||
ns.Labels["pod-security.kubernetes.io/audit"] = podSecurityLevel
|
||||
ns.Labels["pod-security.kubernetes.io/audit-version"] = podSecurityVersion
|
||||
ns.Labels["pod-security.kubernetes.io/warn"] = podSecurityLevel
|
||||
ns.Labels["pod-security.kubernetes.io/warn-version"] = podSecurityVersion
|
||||
|
||||
return ns
|
||||
}
|
||||
@@ -232,6 +240,7 @@ type VeleroOptions struct {
|
||||
SecretData []byte
|
||||
RestoreOnly bool
|
||||
UseNodeAgent bool
|
||||
PrivilegedNodeAgent bool
|
||||
UseVolumeSnapshots bool
|
||||
BSLConfig map[string]string
|
||||
VSLConfig map[string]string
|
||||
@@ -243,6 +252,8 @@ type VeleroOptions struct {
|
||||
Features []string
|
||||
DefaultVolumesToFsBackup bool
|
||||
UploaderType string
|
||||
DefaultSnapshotMoveData bool
|
||||
DisableInformerCache bool
|
||||
}
|
||||
|
||||
func AllCRDs() *unstructured.UnstructuredList {
|
||||
@@ -343,6 +354,14 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
|
||||
deployOpts = append(deployOpts, WithDefaultVolumesToFsBackup())
|
||||
}
|
||||
|
||||
if o.DefaultSnapshotMoveData {
|
||||
deployOpts = append(deployOpts, WithDefaultSnapshotMoveData())
|
||||
}
|
||||
|
||||
if o.DisableInformerCache {
|
||||
deployOpts = append(deployOpts, WithDisableInformerCache())
|
||||
}
|
||||
|
||||
deploy := Deployment(o.Namespace, deployOpts...)
|
||||
|
||||
if err := appendUnstructured(resources, deploy); err != nil {
|
||||
@@ -361,6 +380,9 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
|
||||
if len(o.Features) > 0 {
|
||||
dsOpts = append(dsOpts, WithFeatures(o.Features))
|
||||
}
|
||||
if o.PrivilegedNodeAgent {
|
||||
dsOpts = append(dsOpts, WithPrivilegedNodeAgent())
|
||||
}
|
||||
ds := DaemonSet(o.Namespace, dsOpts...)
|
||||
if err := appendUnstructured(resources, ds); err != nil {
|
||||
fmt.Printf("error appending DaemonSet %s: %s\n", ds.GetName(), err.Error())
|
||||
|
||||
@@ -47,6 +47,10 @@ func TestResources(t *testing.T) {
|
||||
// PSA(Pod Security Admission) and PSS(Pod Security Standards).
|
||||
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce"], "privileged")
|
||||
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce-version"], "latest")
|
||||
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/audit"], "privileged")
|
||||
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/audit-version"], "latest")
|
||||
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/warn"], "privileged")
|
||||
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/warn-version"], "latest")
|
||||
|
||||
crb := ClusterRoleBinding(DefaultVeleroNamespace)
|
||||
// The CRB is a cluster-scoped resource
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
@@ -200,10 +201,11 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
||||
b.resultsLock.Unlock()
|
||||
|
||||
var (
|
||||
errs []error
|
||||
podVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
podVolumes = make(map[string]corev1api.Volume)
|
||||
mountedPodVolumes = sets.String{}
|
||||
errs []error
|
||||
podVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
podVolumes = make(map[string]corev1api.Volume)
|
||||
mountedPodVolumes = sets.String{}
|
||||
attachedPodDevices = sets.String{}
|
||||
)
|
||||
pvcSummary := NewPVCBackupSummary()
|
||||
|
||||
@@ -233,6 +235,9 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
||||
for _, volumeMount := range container.VolumeMounts {
|
||||
mountedPodVolumes.Insert(volumeMount.Name)
|
||||
}
|
||||
for _, volumeDevice := range container.VolumeDevices {
|
||||
attachedPodDevices.Insert(volumeDevice.Name)
|
||||
}
|
||||
}
|
||||
|
||||
var numVolumeSnapshots int
|
||||
@@ -263,6 +268,15 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
||||
continue
|
||||
}
|
||||
|
||||
// check if volume is a block volume
|
||||
if attachedPodDevices.Has(volumeName) {
|
||||
msg := fmt.Sprintf("volume %s declared in pod %s/%s is a block volume. Block volumes are not supported for fs backup, skipping",
|
||||
volumeName, pod.Namespace, pod.Name)
|
||||
log.Warn(msg)
|
||||
pvcSummary.addSkipped(volumeName, msg)
|
||||
continue
|
||||
}
|
||||
|
||||
// volumes that are not mounted by any container should not be backed up, because
|
||||
// its directory is not created
|
||||
if !mountedPodVolumes.Has(volumeName) {
|
||||
@@ -284,7 +298,11 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
||||
}
|
||||
|
||||
volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, b.uploaderType, pvc)
|
||||
if _, err = b.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}); err != nil {
|
||||
// TODO: once backupper is refactored to use controller-runtime, just pass client instead of anonymous func
|
||||
if err := veleroclient.CreateRetryGenerateNameWithFunc(volumeBackup, func() error {
|
||||
_, err := b.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{})
|
||||
return err
|
||||
}); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
@@ -172,7 +173,10 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error {
|
||||
|
||||
volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, backupInfo.snapshotID, repo.Spec.ResticIdentifier, backupInfo.uploaderType, data.SourceNamespace, pvc)
|
||||
|
||||
if err := errorOnly(r.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})); err != nil {
|
||||
// TODO: once restorer is refactored to use controller-runtime, just pass client instead of anonymous func
|
||||
if err := veleroclient.CreateRetryGenerateNameWithFunc(volumeRestore, func() error {
|
||||
return errorOnly(r.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{}))
|
||||
}); err != nil {
|
||||
errs = append(errs, errors.WithStack(err))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -37,14 +37,6 @@ const (
|
||||
// TODO(2.0): remove
|
||||
podAnnotationPrefix = "snapshot.velero.io/"
|
||||
|
||||
// VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes
|
||||
// need to be backed up using pod volume backup.
|
||||
VolumesToBackupAnnotation = "backup.velero.io/backup-volumes"
|
||||
|
||||
// VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes
|
||||
// should be excluded from pod volume backup.
|
||||
VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes"
|
||||
|
||||
// DefaultVolumesToFsBackup specifies whether pod volume backup should be used, by default, to
|
||||
// take backup of all pod volumes.
|
||||
DefaultVolumesToFsBackup = false
|
||||
@@ -216,85 +208,3 @@ func getPodSnapshotAnnotations(obj metav1.Object) map[string]string {
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// GetVolumesToBackup returns a list of volume names to backup for
|
||||
// the provided pod.
|
||||
// Deprecated: Use GetVolumesByPod instead.
|
||||
func GetVolumesToBackup(obj metav1.Object) []string {
|
||||
annotations := obj.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
backupsValue := annotations[VolumesToBackupAnnotation]
|
||||
if backupsValue == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return strings.Split(backupsValue, ",")
|
||||
}
|
||||
|
||||
func getVolumesToExclude(obj metav1.Object) []string {
|
||||
annotations := obj.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return strings.Split(annotations[VolumesToExcludeAnnotation], ",")
|
||||
}
|
||||
|
||||
func contains(list []string, k string) bool {
|
||||
for _, i := range list {
|
||||
if i == k {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetVolumesByPod returns a list of volume names to backup for the provided pod.
|
||||
func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup bool) ([]string, []string) {
|
||||
// tracks the volumes that have been explicitly opted out of backup via the annotation in the pod
|
||||
optedOutVolumes := make([]string, 0)
|
||||
|
||||
if !defaultVolumesToFsBackup {
|
||||
return GetVolumesToBackup(pod), optedOutVolumes
|
||||
}
|
||||
|
||||
volsToExclude := getVolumesToExclude(pod)
|
||||
podVolumes := []string{}
|
||||
for _, pv := range pod.Spec.Volumes {
|
||||
// cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods
|
||||
// and therefore not accessible to the node agent daemon set.
|
||||
if pv.HostPath != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup volumes mounting secrets. Secrets will be backed up separately.
|
||||
if pv.Secret != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup volumes mounting config maps. Config maps will be backed up separately.
|
||||
if pv.ConfigMap != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup volumes mounted as projected volumes, all data in those come from kube state.
|
||||
if pv.Projected != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup DownwardAPI volumes, all data in those come from kube state.
|
||||
if pv.DownwardAPI != nil {
|
||||
continue
|
||||
}
|
||||
// don't backup volumes that are included in the exclude list.
|
||||
if contains(volsToExclude, pv.Name) {
|
||||
optedOutVolumes = append(optedOutVolumes, pv.Name)
|
||||
continue
|
||||
}
|
||||
// don't include volumes that mount the default service account token.
|
||||
if strings.HasPrefix(pv.Name, "default-token") {
|
||||
continue
|
||||
}
|
||||
podVolumes = append(podVolumes, pv.Name)
|
||||
}
|
||||
return podVolumes, optedOutVolumes
|
||||
}
|
||||
|
||||
@@ -17,12 +17,10 @@ limitations under the License.
|
||||
package podvolume
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
@@ -303,322 +301,3 @@ func TestVolumeHasNonRestorableSource(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumesToBackup(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
annotations map[string]string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "nil annotations",
|
||||
annotations: nil,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "no volumes to backup",
|
||||
annotations: map[string]string{"foo": "bar"},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "one volume to backup",
|
||||
annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1"},
|
||||
expected: []string{"volume-1"},
|
||||
},
|
||||
{
|
||||
name: "multiple volumes to backup",
|
||||
annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1,volume-2,volume-3"},
|
||||
expected: []string{"volume-1", "volume-2", "volume-3"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pod := &corev1api.Pod{}
|
||||
pod.Annotations = test.annotations
|
||||
|
||||
res := GetVolumesToBackup(pod)
|
||||
|
||||
// sort to ensure good compare of slices
|
||||
sort.Strings(test.expected)
|
||||
sort.Strings(res)
|
||||
|
||||
assert.Equal(t, test.expected, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumesByPod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
expected struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}
|
||||
defaultVolumesToFsBackup bool
|
||||
}{
|
||||
{
|
||||
name: "should get PVs from VolumesToBackupAnnotation when defaultVolumesToFsBackup is false",
|
||||
defaultVolumesToFsBackup: false,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToBackupAnnotation: "pvbPV1,pvbPV2,pvbPV3",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should get all pod volumes when defaultVolumesToFsBackup is true and no PVs are excluded",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should get all pod volumes except ones excluded when defaultVolumesToFsBackup is true",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from PVB through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{"nonPvbPV1", "nonPvbPV2", "nonPvbPV3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should exclude default service account token from pod volume backup",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from PVB because colume mounting default service account token
|
||||
{Name: "default-token-5xq45"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should exclude host path volumes from pod volume backups",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from pod volume backup through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
// Excluded from pod volume backup because hostpath
|
||||
{Name: "hostPath1", VolumeSource: corev1api.VolumeSource{HostPath: &corev1api.HostPathVolumeSource{Path: "/hostpathVol"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{"nonPvbPV1", "nonPvbPV2", "nonPvbPV3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should exclude volumes mounting secrets",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from pod volume backup through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
// Excluded from pod volume backup because hostpath
|
||||
{Name: "superSecret", VolumeSource: corev1api.VolumeSource{Secret: &corev1api.SecretVolumeSource{SecretName: "super-secret"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{"nonPvbPV1", "nonPvbPV2", "nonPvbPV3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should exclude volumes mounting config maps",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from pod volume backup through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
// Excluded from pod volume backup because hostpath
|
||||
{Name: "appCOnfig", VolumeSource: corev1api.VolumeSource{ConfigMap: &corev1api.ConfigMapVolumeSource{LocalObjectReference: corev1api.LocalObjectReference{Name: "app-config"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{"nonPvbPV1", "nonPvbPV2", "nonPvbPV3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should exclude projected volumes",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
{
|
||||
Name: "projected",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
Projected: &corev1api.ProjectedVolumeSource{
|
||||
Sources: []corev1api.VolumeProjection{{
|
||||
Secret: &corev1api.SecretProjection{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{},
|
||||
Items: nil,
|
||||
Optional: nil,
|
||||
},
|
||||
DownwardAPI: nil,
|
||||
ConfigMap: nil,
|
||||
ServiceAccountToken: nil,
|
||||
}},
|
||||
DefaultMode: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should exclude DownwardAPI volumes",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
{
|
||||
Name: "downwardAPI",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
DownwardAPI: &corev1api.DownwardAPIVolumeSource{
|
||||
Items: []corev1api.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "labels",
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.labels",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: struct {
|
||||
included []string
|
||||
optedOut []string
|
||||
}{
|
||||
included: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
optedOut: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualIncluded, actualOptedOut := GetVolumesByPod(tc.pod, tc.defaultVolumesToFsBackup)
|
||||
|
||||
sort.Strings(tc.expected.included)
|
||||
sort.Strings(actualIncluded)
|
||||
assert.Equal(t, tc.expected.included, actualIncluded)
|
||||
|
||||
sort.Strings(tc.expected.optedOut)
|
||||
sort.Strings(actualOptedOut)
|
||||
assert.Equal(t, tc.expected.optedOut, actualOptedOut)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,9 +107,9 @@ func NewBackupRepository(namespace string, key BackupRepositoryKey) *velerov1api
|
||||
}
|
||||
|
||||
func isBackupRepositoryNotFoundError(err error) bool {
|
||||
return (err == errBackupRepoNotFound)
|
||||
return err == errBackupRepoNotFound
|
||||
}
|
||||
|
||||
func isBackupRepositoryNotProvisionedError(err error) bool {
|
||||
return (err == errBackupRepoNotProvisioned)
|
||||
return err == errBackupRepoNotProvisioned
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func GetS3ResticEnvVars(config map[string]string) (map[string]string, error) {
|
||||
result[awsSecretKeyEnvVar] = creds.SecretAccessKey
|
||||
result[awsSessTokenEnvVar] = creds.SessionToken
|
||||
result[awsCredentialsFileEnvVar] = ""
|
||||
result[awsProfileEnvVar] = ""
|
||||
result[awsProfileEnvVar] = "" // profile is not needed since we have the credentials from profile via GetS3Credentials
|
||||
result[awsConfigFileEnvVar] = ""
|
||||
}
|
||||
|
||||
@@ -81,12 +81,13 @@ func GetS3Credentials(config map[string]string) (*credentials.Value, error) {
|
||||
opts := session.Options{}
|
||||
credentialsFile := config[CredentialsFileKey]
|
||||
if credentialsFile == "" {
|
||||
credentialsFile = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
||||
credentialsFile = os.Getenv(awsCredentialsFileEnvVar)
|
||||
}
|
||||
if credentialsFile != "" {
|
||||
opts.SharedConfigFiles = append(opts.SharedConfigFiles, credentialsFile)
|
||||
opts.SharedConfigState = session.SharedConfigEnable
|
||||
}
|
||||
opts.Profile = config[awsProfileKey]
|
||||
|
||||
sess, err := session.NewSessionWithOptions(opts)
|
||||
if err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user