Compare commits

..

15 Commits

Author SHA1 Message Date
Wenkai Yin(尹文开)
a818c97dde Merge pull request #4602 from reasonerjt/cleanup-changelog-1.8
Clean up the change log for v1.8
2022-01-30 21:55:59 +08:00
Wenkai Yin(尹文开)
106f8a0376 Merge branch 'release-1.8' into cleanup-changelog-1.8 2022-01-30 21:42:43 +08:00
Wenkai Yin(尹文开)
3c636760af Merge pull request #4604 from reasonerjt/custom-plugin-doc-update-1.8
[Cherry pick to 1.8] Undeprecate the volumesnapshot plugin in the doc
2022-01-30 21:41:18 +08:00
Daniel Jiang
de1722b001 Undeprecate the volumesnapshot plugin in the doc
Since Itemsnapshotter plugin is still WIP,
this commit removes the reference and the deprecation of volumeSnapshotter plugin
from the doc to avoid confusion.
We'll update the doc when it's ready and we have a reference
implementation.

Signed-off-by: Daniel Jiang <jiangd@vmware.com>
2022-01-30 16:15:53 +08:00
Daniel Jiang
0ceb3850e7 Clean up the change log
Clean up the cherrypicked changelog files for 1.8 branch

Signed-off-by: Daniel Jiang <jiangd@vmware.com>
2022-01-30 14:35:03 +08:00
qiuming
ae4cfa9e0e Merge pull request #4600 from danfengliu/cherry-pick-change-1.8-e2e-plugins-version-to-release-version
[cherry pick 1.8] Change 1.8 plugins version to release version
2022-01-29 18:30:51 +08:00
danfengl
796913c3cc cherry pick to 1.8 Change 1.8 plugins version to release version
Signed-off-by: danfengl <danfengl@vmware.com>
2022-01-29 10:19:22 +00:00
danfengliu
4a8d2b00fb Merge pull request #4596 from danfengliu/cherry-pick-add-1.8-plugin-to-e2e
[cherry-pick 1.8] Add 1.8 plugins map in e2e test
2022-01-29 17:43:29 +08:00
danfengl
ccf699171d Cherry pick to1.8 - Add 1.8 plugins map in e2e test
Signed-off-by: danfengl <danfengl@vmware.com>
2022-01-29 09:30:32 +00:00
qiuming
c11b682ec4 Merge pull request #4594 from qiuming-best/release-1.8
E2E SSR test add retry mechanism and logs
2022-01-29 17:06:51 +08:00
Ming
1d656a0aec E2E SSR test add retry mechanism and logs
Signed-off-by: Ming <mqiu@vmware.com>
2022-01-29 16:25:01 +08:00
Wenkai Yin(尹文开)
764143da8d Merge pull request #4568 from reasonerjt/fix-custom-plugin-doc-1.8
[Cherrypick-1.8] Remove reference of restic_restore_action.go from the doc
2022-01-26 10:21:01 +08:00
Daniel Jiang
dbce8a2e90 Remove reference of restic_restore_action.go from the doc
fixes #4554

Signed-off-by: Daniel Jiang <jiangd@vmware.com>
2022-01-24 15:26:53 +08:00
danfengliu
1a8e660ea3 Merge pull request #4535 from reasonerjt/pin-img
Pin the base image and golang img for v1.8.0 release
2022-01-17 09:38:46 +08:00
Daniel Jiang
3607bf0f72 Pin the base image and golang img for v1.8.0 release
Signed-off-by: Daniel Jiang <jiangd@vmware.com>
2022-01-16 23:54:59 +08:00
305 changed files with 4150 additions and 17678 deletions

View File

@@ -9,13 +9,12 @@ reviewers:
groups:
maintainers:
- zubron
- dsu-igeek
- jenting
- sseago
- reasonerjt
- ywk253100
- blackpiglet
- qiuming-best
- shubham-pampattiwar
tech-writer:
- a-mccarthy

6
.github/stale.yml vendored
View File

@@ -14,15 +14,9 @@ exemptLabels:
- Area/Design
- Area/Documentation
- Area/Plugins
- Bug
- Enhancement/User
- kind/requirement
- kind/refactor
- kind/tech-debt
- limitation
- Needs investigation
- Needs triage
- Needs Product
- P0 - Hair on fire
- P1 - Important
- P2 - Long-term important

View File

@@ -64,13 +64,12 @@ jobs:
#- 1.15.12
- 1.16.15
- 1.17.17
- 1.18.20
- 1.19.16
- 1.20.15
- 1.21.12
- 1.22.9
- 1.23.6
- 1.24.0
- 1.18.15
- 1.19.7
- 1.20.2
- 1.21.1
- 1.22.0
- 1.23.0
fail-fast: false
steps:
- name: Set up Go
@@ -85,7 +84,7 @@ jobs:
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
- uses: engineerd/setup-kind@v0.5.0
with:
version: "v0.14.0"
version: "v0.11.1"
image: "kindest/node:v${{ matrix.k8s }}"
- name: Fetch built CLI
id: cli-cache
@@ -122,12 +121,5 @@ jobs:
CREDS_FILE=/tmp/credential BSL_BUCKET=bucket \
ADDITIONAL_OBJECT_STORE_PROVIDER=aws ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
ADDITIONAL_CREDS_FILE=/tmp/credential ADDITIONAL_BSL_BUCKET=additional-bucket \
GINKGO_FOCUS='Basic\].+\[ClusterResource' VELERO_IMAGE=velero:pr-test \
make -C test/e2e run
timeout-minutes: 30
- name: Upload debug bundle
if: ${{ failure() }}
uses: actions/upload-artifact@v2
with:
name: DebugBundle
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
GINKGO_FOCUS=Basic VELERO_IMAGE=velero:pr-test \
make -C test/e2e run

View File

@@ -21,9 +21,3 @@ jobs:
${{ runner.os }}-go-
- name: Make ci
run: make ci
- name: Upload test coverage
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: coverage.out
verbose: true

View File

@@ -12,16 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
# there are at least two commits: the first one is the merge commit and the second one is the real commit
# contains the changes.
# As we use the Dockerfile's commit ID as the tag of the build-image, fetching only 1 commit causes the merge
# commit ID to be the tag.
# While when running make commands locally, as the local git repository usually contains all commits, the Dockerfile's
# commit ID is the second one. This is mismatch with the images in Dockerhub
fetch-depth: 2
- uses: actions/checkout@master
- name: Build
run: make build-image

View File

@@ -2,9 +2,7 @@ name: Main CI
on:
push:
branches:
- 'main'
- 'release-**'
branches: [ main ]
tags:
- '*'
@@ -42,29 +40,9 @@ jobs:
- name: Test
run: make test
- name: Upload test coverage
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: coverage.out
verbose: true
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
- name: Publish container image
if: github.repository == 'vmware-tanzu/velero'
run: |
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
./hack/docker-push.sh
# Use the JSON key in secret to login gcr.io
- uses: 'docker/login-action@v1'
with:
registry: 'gcr.io' # or REGION.docker.pkg.dev
username: '_json_key'
password: '${{ secrets.GCR_SA_KEY }}'
# Push image to GCR to facilitate some environments that have rate limitation to docker hub, e.g. vSphere.
- name: Publish container image to GCR
if: github.repository == 'vmware-tanzu/velero'
run: |
REGISTRY=gcr.io/velero-gcp ./hack/docker-push.sh

View File

@@ -14,7 +14,7 @@
dist: _output
builds:
- main: ./cmd/velero/velero.go
- main: ./cmd/velero/main.go
env:
- CGO_ENABLED=0
goos:

View File

@@ -14,7 +14,7 @@ If you're using Velero and want to add your organization to this list,
<a href="https://sighup.io/" border="0" target="_blank"><img alt="sighup.io" src="site/static/img/adopters/sighup.svg" height="50"></a>&nbsp; &nbsp; &nbsp;
<a href="https://mayadata.io/" border="0" target="_blank"><img alt="mayadata.io" src="site/static/img/adopters/mayadata.svg" height="50"></a>&nbsp; &nbsp; &nbsp;
<a href="https://www.replicated.com/" border="0" target="_blank"><img alt="replicated.com" src="site/static/img/adopters/replicated-logo-red.svg" height="50"></a>
<a href="https://cloudcasa.io/" border="0" target="_blank"><img alt="cloudcasa.io" src="site/static/img/adopters/cloudcasa.svg" height="50"></a>
## Success Stories
Below is a list of adopters of Velero in **production environments** that have
@@ -56,11 +56,8 @@ MayaData is a large user of Velero as well as a contributor. MayaData offers a D
Okteto integrates Velero in [Okteto Cloud][94] and [Okteto Enterprise][95] to periodically backup and restore our clusters for disaster recovery. Velero is also a core software building block to provide namespace cloning capabilities, a feature that allows our users cloning staging environments into their personal development namespace for providing production-like development environments.
**[Replicated][100]**<br>
Replicated uses the Velero open source project to enable snapshots in [KOTS][101] to backup Kubernetes manifests & persistent volumes. In addition to the default functionality that Velero provides, [KOTS][101] provides a detailed interface in the [Admin Console][102] that can be used to manage the storage destination and schedule, and to perform and monitor the backup and restore process.<br>
**[CloudCasa][103]**<br>
[Catalogic Software][104] integrates Velero with [CloudCasa][103] - A Smart Home in the Cloud for Backups. CloudCasa is a simple, scalable, cloud-native solution providing data protection and disaster recovery as a service. This solution is built using Kubernetes for protecting Kubernetes clusters.<br>
Replicated uses the Velero open source project to enable snapshots in [KOTS][101] to backup Kubernetes manifests & persistent volumes. In addition to the default functionality that Velero provides, [KOTS][101] provides a detailed interface in the [Admin Console][102] that can be used to manage the storage destination and schedule, and to perform and monitor the backup and restore process.
## Adding your organization to the list of Velero Adopters
If you are using Velero and would like to be included in the list of `Velero Adopters`, add an SVG version of your logo to the `site/static/img/adopters` directory in this repo and submit a [pull request][3] with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). See this for an example [PR][4].
@@ -113,6 +110,3 @@ If you would like to add your logo to a future `Adopters of Velero` section on [
[100]: https://www.replicated.com
[101]: https://kots.io
[102]: https://kots.io/kotsadm/snapshots/overview/
[103]: https://cloudcasa.io/
[104]: https://www.catalogicsoftware.com/

View File

@@ -1,9 +1,7 @@
## Current release:
* [CHANGELOG-1.9.md][19]
* [CHANGELOG-1.7.md][17]
## Older releases:
* [CHANGELOG-1.8.md][18]
* [CHANGELOG-1.7.md][17]
* [CHANGELOG-1.6.md][16]
* [CHANGELOG-1.5.md][15]
* [CHANGELOG-1.4.md][14]
@@ -22,8 +20,6 @@
* [CHANGELOG-0.3.md][1]
[19]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.9.md
[18]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.8.md
[17]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.7.md
[16]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.6.md
[15]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.5.md

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$BUILDPLATFORM golang:1.17.11 as builder-env
FROM --platform=$BUILDPLATFORM golang:1.17.6 as builder-env
ARG GOPROXY
ARG PKG
@@ -50,7 +50,8 @@ RUN mkdir -p /output/usr/bin && \
go build -o /output/${BIN} \
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN}
FROM gcr.io/distroless/base-debian11@sha256:49d2923f35d66b8402487a7c01bc62a66d8279cd42e89c11b64cdce8d5826c03
# The digest for tag 'nonroot' at the time of the release
FROM gcr.io/distroless/base-debian10@sha256:6dc8ca7c3bbdb1a00fd8f1229b1b8c88986a5818b830e3a42d4946982dbbf18b
LABEL maintainer="Nolan Brubaker <brubakern@vmware.com>"

View File

@@ -6,13 +6,12 @@
| Maintainer | GitHub ID | Affiliation |
| --------------- | --------- | ----------- |
| Dave Smith-Uchida | [dsu-igeek](https://github.com/dsu-igeek) | [Kasten](https://github.com/kastenhq/) |
| Bridget McErlean | [zubron](https://github.com/zubron) | [VMware](https://www.github.com/vmware/) |
| Dave Smith-Uchida | [dsu-igeek](https://github.com/dsu-igeek) | [VMware](https://www.github.com/vmware/) |
| JenTing Hsiao | [jenting](https://github.com/jenting) | [SUSE](https://github.com/SUSE/)
| Scott Seago | [sseago](https://github.com/sseago) | [OpenShift](https://github.com/openshift)
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/)
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
| Ming Qiu | [qiuming-best](https://github.com/qiuming-best) | [VMware](https://www.github.com/vmware/) |
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift)
## Emeritus Maintainers
* Adnan Abdulhussein ([prydonius](https://github.com/prydonius))
@@ -22,8 +21,6 @@
* Nolan Brubaker ([nrb](https://github.com/nrb))
* Ashish Amarnath ([ashish-amarnath](https://github.com/ashish-amarnath))
* Carlisia Thompson ([carlisia](https://github.com/carlisia))
* Bridget McErlean ([zubron](https://github.com/zubron))
* JenTing Hsiao ([jenting](https://github.com/jenting))
## Velero Contributors & Stakeholders
@@ -32,6 +29,6 @@
| Architect | Dave Smith-Uchida (dsu-igeek) |
| Technical Lead | Daniel Jiang (reasonerjt) |
| Kubernetes CSI Liaison | |
| Deployment | |
| Community Management | Orlin Vasilev (OrlinVasilev) |
| Deployment | JenTing Hsiao (jenting) |
| Community Management | Jonas Rosland (jonasrosland) |
| Product Management | Eleanor Millman (eleanor-millman) |

View File

@@ -82,7 +82,7 @@ see: https://velero.io/docs/main/build-from-source/#making-images-and-updating-v
endef
# The version of restic binary to be downloaded
RESTIC_VERSION ?= 0.13.1
RESTIC_VERSION ?= 0.12.1
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le
BUILDX_PLATFORMS ?= $(subst -,/,$(ARCH))
@@ -125,7 +125,6 @@ all-containers: container-builder-env
@$(MAKE) --no-print-directory container BIN=velero-restic-restore-helper
local: build-dirs
# Add DEBUG=1 to enable debug locally
GOOS=$(GOOS) \
GOARCH=$(GOARCH) \
VERSION=$(VERSION) \

View File

@@ -38,20 +38,13 @@ See [the list of releases][6] to find out about feature changes.
The following is a list of the supported Kubernetes versions for each Velero version.
| Velero version | Expected Kubernetes version compatibility| Tested on Kubernetes version|
|----------------|--------------------|--------------------|
| 1.9 | 1.16-latest | 1.20.5, 1.21.2, 1.22.5, 1.23, and 1.24 |
| 1.8 | 1.16-latest | |
| 1.6.3-1.7.1 | 1.12-latest ||
| 1.60-1.6.2 | 1.12-1.21 ||
| 1.5 | 1.12-1.21 ||
| 1.4 | 1.10-1.21 | |
Velero supports IPv4, IPv6, and dual stack environments. Support for this was tested against Velero v1.8.
The Velero maintainers are continuously working to expand testing coverage, but are not able to test every combination of Velero and supported Kubernetes versions for each Velero release. The table above is meant to track the current testing coverage and the expected supported Kubernetes versions for each Velero version. If you have a question about test coverage before v1.9, please reach out in the [#velero-users](https://kubernetes.slack.com/archives/C6VCGP4MT) Slack channel.
If you are interested in using a different version of Kubernetes with a given Velero version, we'd recommend that you perform testing before installing or upgrading your environment. For full information around capabilities within a release, also see the Velero [release notes](https://github.com/vmware-tanzu/velero/releases) or Kubernetes [release notes](https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG). See the Velero [support page](https://velero.io/docs/latest/support-process/) for information about supported versions of Velero.
| Velero version | Kubernetes versions|
|----------------|--------------------|
| 1.8 | 1.16-latest |
| 1.6.3-1.7.1 | 1.12-latest |
| 1.60-1.6.2 | 1.12-1.21 |
| 1.5 | 1.12-1.21 |
| 1.4 | 1.10-1.21 |
[1]: https://github.com/vmware-tanzu/velero/workflows/Main%20CI/badge.svg
[2]: https://github.com/vmware-tanzu/velero/actions?query=workflow%3A"Main+CI"

View File

@@ -1 +1,42 @@
# Please go to the [Velero Wiki](https://github.com/vmware-tanzu/velero/wiki/) to see our latest roadmap, archived roadmaps and roadmap guidance.
## Velero Roadmap
### About this document
This document provides a link to the [Velero Project boards](https://github.com/vmware-tanzu/velero/projects) that serves as the up to date description of items that are in the release pipeline. The release boards have separate swim lanes based on prioritization. Most items are gathered from the community or include a feedback loop with the community. This should serve as a reference point for Velero users and contributors to understand where the project is heading, and help determine if a contribution could be conflicting with a longer term plan.
### How to help?
Discussion on the roadmap can take place in threads under [Issues](https://github.com/vmware-tanzu/velero/issues) or in [community meetings](https://velero.io/community/). Please open and comment on an issue if you want to provide suggestions, use cases, and feedback to an item in the roadmap. Please review the roadmap to avoid potential duplicated effort.
### How to add an item to the roadmap?
One of the most important aspects in any open source community is the concept of proposals. Large changes to the codebase and / or new features should be preceded by a [proposal](https://github.com/vmware-tanzu/velero/blob/main/GOVERNANCE.md#proposal-process) in our repo.
For smaller enhancements, you can open an issue to track that initiative or feature request.
We work with and rely on community feedback to focus our efforts to improve Velero and maintain a healthy roadmap.
### Current Roadmap
The following table includes the current roadmap for Velero. If you have any questions or would like to contribute to Velero, please attend a [community meeting](https://velero.io/community/) to discuss with our team. If you don't know where to start, we are always looking for contributors that will help us reduce technical, automation, and documentation debt.
Please take the timelines & dates as proposals and goals. Priorities and requirements change based on community feedback, roadblocks encountered, community contributions, etc. If you depend on a specific item, we encourage you to attend community meetings to get updated status information, or help us deliver that feature by contributing to Velero.
`Last Updated: October 2021`
#### 1.8.0 Roadmap (to be delivered January/February 2021)
|Issue|Description|Timeline|Notes|
|---|---|---|---|
|[4108](https://github.com/vmware-tanzu/velero/issues/4108), [4109](https://github.com/vmware-tanzu/velero/issues/4109)|Solution for CSI - Azure and AWS|2022 H1|Currently, Velero plugins for AWS and Azure cannot back up persistent volumes that were provisioned using the CSI driver. This will fix that.|
|[3229](https://github.com/vmware-tanzu/velero/issues/3229),[4112](https://github.com/vmware-tanzu/velero/issues/4112)|Moving data mover functionality from the Velero Plugin for vSphere into Velero proper|2022 H1|This work is a precursor to decoupling the Astrolabe snapshotting infrastructure.|
|[3533](https://github.com/vmware-tanzu/velero/issues/3533)|Upload Progress Monitoring|2022 H1|Finishing up the work done in the 1.7 timeframe. The data mover work depends on this.|
|[1975](https://github.com/vmware-tanzu/velero/issues/1975)|Test dual stack mode|2022 H1|We already tested IPv6, but we want to confirm that dual stack mode works as well.|
|[2082](https://github.com/vmware-tanzu/velero/issues/2082)|Delete Backup CRs on removing target location. |2022 H1||
|[3516](https://github.com/vmware-tanzu/velero/issues/3516)|Restore issue with MutatingWebhookConfiguration v1beta1 API version|2022 H1||
|[2308](https://github.com/vmware-tanzu/velero/issues/2308)|Restoring nodePort service that has nodePort preservation always fails if service already exists in the namespace|2022 H1||
|[4115](https://github.com/vmware-tanzu/velero/issues/4115)|Support for multiple set of credentials for VolumeSnapshotLocations|2022 H1||
|[1980](https://github.com/vmware-tanzu/velero/issues/1980)|Velero triggers backup immediately for scheduled backups|2022 H1||
|[4067](https://github.com/vmware-tanzu/velero/issues/4067)|Pre and post backup and restore hooks|2022 H1||
|[3742](https://github.com/vmware-tanzu/velero/issues/3742)|Carvel packaging for Velero for vSphere|2022 H1|AWS and Azure have been completed already.|
|[3285](https://github.com/vmware-tanzu/velero/issues/3285)|Design doc for Velero plugin versioning|2022 H1||
|[4231](https://github.com/vmware-tanzu/velero/issues/4231)|Technical health (prioritizing giving developers confidence and saving developers time)|2022 H1|More automated tests (especially the pre-release manual tests) and more automation of the running of tests.|
|[4110](https://github.com/vmware-tanzu/velero/issues/4110)|Solution for CSI - GCP|2022 H1|Currently, the Velero plugin for GCP cannot back up persistent volumes that were provisioned using the CSI driver. This will fix that.|
|[3742](https://github.com/vmware-tanzu/velero/issues/3742)|Carvel packaging for Velero for restic|2022 H1|AWS and Azure have been completed already.|
|[3454](https://github.com/vmware-tanzu/velero/issues/3454),[4134](https://github.com/vmware-tanzu/velero/issues/4134),[4135](https://github.com/vmware-tanzu/velero/issues/4135)|Kubebuilder tech debt|2022 H1||
|[4111](https://github.com/vmware-tanzu/velero/issues/4111)|Ignore items returned by ItemSnapshotter.AlsoHandles during backup|2022 H1|This will enable backup of complex objects, because we can then tell Velero to ignore things that were already backed up when Velero was previously called recursively.|
Other work may make it into the 1.8 release, but this is the work that will be prioritized first.

View File

@@ -103,7 +103,7 @@ local_resource(
local_resource(
"restic_binary",
cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=linux GOARCH=amd64 RESTIC_VERSION=0.13.1 OUTPUT_DIR=_tiltbuild/restic ./hack/download-restic.sh',
cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=linux GOARCH=amd64 RESTIC_VERSION=0.12.0 OUTPUT_DIR=_tiltbuild/restic ./hack/download-restic.sh',
)
# Note: we need a distro with a bash shell to exec into the Velero container

View File

@@ -15,8 +15,8 @@ https://velero.io/docs/v1.8/upgrade-to-1.8/
### Highlights
#### Velero plugins now support handling volumes created by the CSI drivers of cloud providers
Versions 1.4 of the Velero plugins for AWS, Azure and GCP now support snapshotting and restoring the persistent volumes provisioned by CSI driver via the APIs of the cloud providers. With this enhancement, users can backup and restore the persistent volumes on these cloud providers without using the Velero CSI plugin. The CSI plugin will remain beta and the feature flag `EnableCSI` will be disabled by default.
#### The plugins to support handling volumes created by the AWS CSI driver
The new versions of plugins for AWS, Azure and GCP will be released with the support of snapshotting and restoring the persistent volumes provisioned by CSI driver via the APIs of the cloud providers. With this enhancement, users can backup and restore the persistent volumes on these could providers without using the CSI plugin, which will remain beta and the feature flag `EnableCSI` will be disabled by default.
For the version of the plugins and the CSI drivers they support respectively please see the table:
@@ -26,30 +26,13 @@ For the version of the plugins and the CSI drivers they support respectively ple
| velero-plugin-for-microsoft-azure | v1.4.0 | disk.csi.azure.com |
| velero-plugin-for-gcp | v1.4.0 | pd.csi.storage.gke.io |
#### IPv6 dual stack support
We've verified the functionality of Velero on IPv6 dual stack by successfully running the E2E test on IPv6 dual stack environment.
#### Refactor the controllers using Kubebuilder v3
In this release we continued our code modernization work, rewriting some controllers using Kubebuilder v3. This work is ongoing and we will continue to make progress in future releases.
#### Enhancements to E2E test cases
More test cases have been added to the E2E test suite to improve the release health.
#### Respect the cron setting of scheduled backup
The creation time is now taken into account to calculate the next run for scheduled backup.
#### Deleting BSLs also cleans up related resources
When a Backup Storage Location (BSL) is deleted, backup and Restic repository resources will also be deleted.
#### Breaking changes
Starting in v1.8, Velero will only support Kubernetes v1 CRD meaning that Velero v1.8+ will only run on Kubernetes v1.16+. Before upgrading, make sure you are running a supported Kubernetes version. For more information, see our [compatibility matrix](https://github.com/vmware-tanzu/velero#velero-compatibility-matrix).
#### Upload Progress Monitoring and Item Snapshotter
Item Snapshotter plugin API was merged. This will support both Upload Progress
monitoring and the planned Data Mover. Upload Progress monitoring PRs are
in progress for 1.9.
#### Break change
Starting v1.8 velero will only support v1 CRD, therefore, it will only run on Kubernetes v1.16+
### All changes
* E2E SSR test add retry mechanism and logs (#4594, @mqiu)
* Update doc for v1.8 (#4517, @reasonerjt)
* E2E test on ssr object with controller namespace mix-ups (#4521, @mqiu)
* Check whether the volume is provisioned by CSI driver or not by the annotation as well (#4513, @ywk253100)
* Initialize the labels field of `velero backup-location create` option to avoid #4484 (#4491, @ywk253100)
@@ -108,3 +91,4 @@ Also added DownloadTargetKindBackupItemSnapshots for retrieving the signed URL t
Part of the Upload Progress enhancement (#3533) (#4077, @dsmithuchida)
* Add upgrade test in E2E test (#4058, @danfengliu)
* Handle namespace mapping for PVs without snapshots on restore (#3708, @sseago)

View File

@@ -1,131 +0,0 @@
## v1.9.1
### 2022-08-03
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.9.1
### Container Image
`velero/velero:v1.9.1`
### Documentation
https://velero.io/docs/v1.9/
### Upgrading
https://velero.io/docs/v1.9/upgrade-to-1.9/
### All changes
* Fix bsl validation bug: the BSL is validated continually and doesn't respect the validation period configured (#5112, @ywk253100)
* Modify BackupStoreGetter to avoid BSL spec changes (#5134, @sseago)
* Delay CA file deletion in PVB controller. (#5150, @jxun)
* Skip registering "crd-remap-version" plugin when feature flag "EnableAPIGroupVersions" is set (#5173, @reasonerjt)
* Fix restic backups to multiple backup storage locations bug (#5175, @qiuming-best)
* Make CSI snapshot creation timeout configurable. (#5189, @jxun)
* Add annotation "pv.kubernetes.io/migrated-to" for CSI checking. (#5186, @jxun)
* Bump up base image and package version to fix CVEs. (#5202, @ywk253100)
## v1.9.0
### 2022-06-13
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.9.0
### Container Image
`velero/velero:v1.9.0`
### Documentation
https://velero.io/docs/v1.9/
### Upgrading
https://velero.io/docs/v1.9/upgrade-to-1.9/
### Highlights
#### Improvement to the CSI plugin
- Bump up to the CSI volume snapshot v1 API
- No VolumeSnapshot will be left in the source namespace of the workload
- Report metrics for CSI snapshots
More improvements please refer to [CSI plugin improvement](https://github.com/vmware-tanzu/velero/issues?q=is%3Aissue+label%3A%22CSI+plugin+-+GA+-+phase1%22+is%3Aclosed)
With these improvements we'll provide official support for CSI snapshots on AKS/EKS clusters. (with CSI plugin v0.3.0)
#### Refactor the controllers using Kubebuilder v3
In this release we continued our code modernization work, rewriting some controllers using Kubebuilder v3. This work is ongoing and we will continue to make progress in future releases.
#### Optionally restore status on selected resources
Options are added to the CLI and Restore spec to control the group of resources whose status will be restored.
#### ExistingResourcePolicy in the restore API
Users can choose to overwrite or patch the existing resources during restore by setting this policy.
#### Upgrade integrated Restic version and add skip TLS validation in Restic command
Upgrade integrated Restic version, which will resolve some of the CVEs, and support skip TLS validation in Restic backup/restore.
#### Breaking changes
With bumping up the API to v1 in CSI plugin, the v0.3.0 CSI plugin will only work for Kubernetes v1.20+
### All changes
* restic: add full support for setting SecurityContext for restore init container from configMap. (#4084, @MatthieuFin)
* Add metrics backup_items_total and backup_items_errors (#4296, @tobiasgiese)
* Convert PodVolumebackup controller to the Kubebuilder framework (#4436, @fgold)
* Skip not mounted volumes when backing up (#4497, @dkeven)
* Update doc for v1.8 (#4517, @reasonerjt)
* Fix bug to make the restic prune frequency configurable (#4518, @ywk253100)
* Add E2E test of backups sync from BSL (#4545, @mqiu)
* Fix: OrderedResources in Schedules (#4550, @dbrekau)
* Skip volumes of non-running pods when backing up (#4584, @bynare)
* E2E SSR test add retry mechanism and logs (#4591, @mqiu)
* Add pushing image to GCR in github workflow to facilitate some environments that have rate limitation to docker hub, e.g. vSphere. (#4623, @jxun)
* Add existingResourcePolicy to Restore API (#4628, @shubham-pampattiwar)
* Fix E2E backup namespaces test (#4634, @qiuming-best)
* Update image used by E2E test to gcr.io (#4639, @jxun)
* Add multiple label selector support to Velero Backup and Restore APIs (#4650, @shubham-pampattiwar)
* Convert Pod Volume Restore resource/controller to the Kubebuilder framework (#4655, @ywk253100)
* Update --use-owner-references-in-backup description in velero command line. (#4660, @jxun)
* Avoid overwritten hook's exec.container parameter when running pod command executor. (#4661, @jxun)
* Support regional pv for GKE (#4680, @jxun)
* Bypass the remap CRD version plugin when v1beta1 CRD is not supported (#4686, @reasonerjt)
* Add GINKGO_SKIP to support skip specific case in e2e test. (#4692, @jxun)
* Add --pod-labels flag to velero install (#4694, @j4m3s-s)
* Enable coverage in test.sh and upload to codecov (#4704, @reasonerjt)
* Mark the BSL as "Unavailable" when gets any error and add a new field "Message" to the status to record the error message (#4719, @ywk253100)
* Support multiple skip option for E2E test (#4725, @jxun)
* Add PriorityClass to the AdditionalItems of Backup's PodAction and Restore's PodAction plugin to backup and restore PriorityClass if it is used by a Pod. (#4740, @phuongatemc)
* Insert all restore errors and warnings into restore log. (#4743, @sseago)
* Refactor schedule controller with kubebuilder (#4748, @ywk253100)
* Garbage collector now adds labels to backups that failed to delete for BSLNotFound, BSLCannotGet, BSLReadOnly reasons. (#4757, @kaovilai)
* Skip podvolumerestore creation when restore excludes pv/pvc (#4769, @half-life666)
* Add parameter for e2e test to support modify kibishii install path. (#4778, @jxun)
* Ensure the restore hook applied to new namespace based on the mapping (#4779, @reasonerjt)
* Add ability to restore status on selected resources (#4785, @RafaeLeal)
* Do not take snapshot for PV to avoid duplicated snapshotting, when CSI feature is enabled. (#4797, @jxun)
* Bump up to v1 API for CSI snapshot (#4800, @reasonerjt)
* fix: delete empty backups (#4817, @yuvalman)
* Add CSI VolumeSnapshot related metrics. (#4818, @jxun)
* Fix default-backup-ttl not work (#4831, @qiuming-best)
* Make the vsc created by backup sync controller deletable (#4832, @reasonerjt)
* Make in-progress backup/restore as failed when doing the reconcile to avoid hanging in in-progress status (#4833, @ywk253100)
* Use controller-gen to generate the deep copy methods for objects (#4838, @ywk253100)
* Update integrated Restic version and add insecureSkipTLSVerify for Restic CLI. (#4839, @jxun)
* Modify CSI VolumeSnapshot metric related code. (#4854, @jxun)
* Refactor backup deletion controller based on kubebuilder (#4855, @reasonerjt)
* Remove VolumeSnapshots created during backup when CSI feature is enabled. (#4858, @jxun)
* Convert Restic Repository resource/controller to the Kubebuilder framework (#4859, @qiuming-best)
* Add ClusterClasses to the restore priority list (#4866, @reasonerjt)
* Cleanup the .velero folder after restic done (#4872, @big-appled)
* Delete orphan CSI snapshots in backup sync controller (#4887, @reasonerjt)
* Make waiting VolumeSnapshot to ready process parallel. (#4889, @jxun)
* continue rather than return for non-matching restore action label (#4890, @sseago)
* Make in-progress PVB/PVR as failed when restic controller restarts to avoid hanging backup/restore (#4893, @ywk253100)
* Refactor BSL controller with periodical enqueue source (#4894, @jxun)
* Make garbage collection for expired backups configurable (#4897, @ywk253100)
* Bump up the version of distroless to base-debian11 (#4898, @ywk253100)
* Add schedule ordered resources E2E test (#4913, @qiuming-best)
* Make velero completion zsh command output can be used by `source` command. (#4914, @jxun)
* Enhance the map flag to support parsing input value contains entry delimiters (#4920, @ywk253100)
* Fix E2E test [Backups][Deletion][Restic] on GCP. (#4968, @jxun)
* Disable status as sub resource in CRDs (#4972, @ywk253100)
* Add more information for failing to get path or snapshot in restic backup and restore. (#4988, @jxun)
* When spec.RestoreStatus is empty, don't restore status (#5015, @sseago)

View File

@@ -38,12 +38,6 @@ func main() {
case <-ticker.C:
if done() {
fmt.Println("All restic restores are done")
err := removeFolder()
if err != nil {
fmt.Println(err)
} else {
fmt.Println("Done cleanup .velero folder")
}
return
}
}
@@ -81,28 +75,3 @@ func done() bool {
return true
}
// remove .velero folder
func removeFolder() error {
children, err := ioutil.ReadDir("/restores")
if err != nil {
return err
}
for _, child := range children {
if !child.IsDir() {
fmt.Printf("%s is not a directory, skipping.\n", child.Name())
continue
}
donePath := filepath.Join("/restores", child.Name(), ".velero")
err = os.RemoveAll(donePath)
if err != nil {
return err
}
fmt.Printf("Deleted %s", donePath)
}
return nil
}

View File

@@ -37,11 +37,6 @@ spec:
spec:
description: BackupSpec defines the specification for a Velero backup.
properties:
csiSnapshotTimeout:
description: CSISnapshotTimeout specifies the time used to wait for
CSI VolumeSnapshot status turns to ReadyToUse during creation, before
returning error as timeout. The default value is 10 minute.
type: string
defaultVolumesToRestic:
description: DefaultVolumesToRestic specifies whether restic should
be used to take a backup of all pod volumes by default.
@@ -319,61 +314,6 @@ spec:
type: string
type: object
type: object
orLabelSelectors:
description: OrLabelSelectors is list of metav1.LabelSelector to filter
with when adding individual objects to the backup. If multiple provided
they will be joined by the OR operator. LabelSelector as well as
OrLabelSelectors cannot co-exist in backup request, only one of
them can be used.
items:
description: A label selector is a label query over a set of resources.
The result of matchLabels and matchExpressions are ANDed. An empty
label selector matches all objects. A null label selector matches
no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the
key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
nullable: true
type: array
orderedResources:
additionalProperties:
type: string
@@ -414,14 +354,6 @@ spec:
format: date-time
nullable: true
type: string
csiVolumeSnapshotsAttempted:
description: CSIVolumeSnapshotsAttempted is the total number of attempted
CSI VolumeSnapshots for this backup.
type: integer
csiVolumeSnapshotsCompleted:
description: CSIVolumeSnapshotsCompleted is the total number of successfully
completed CSI VolumeSnapshots for this backup.
type: integer
errors:
description: Errors is a count of all error messages that were generated
during execution of the backup. The actual errors are in the backup's
@@ -432,10 +364,6 @@ spec:
format: date-time
nullable: true
type: string
failureReason:
description: FailureReason is an error that caused the entire backup
to fail.
type: string
formatVersion:
description: FormatVersion is the backup format version, including
major, minor, and patch version.

View File

@@ -158,10 +158,6 @@ spec:
format: date-time
nullable: true
type: string
message:
description: Message is a message about the backup storage location's
status.
type: string
phase:
description: Phase is the current state of the BackupStorageLocation.
enum:
@@ -172,7 +168,8 @@ spec:
type: object
served: true
storage: true
subresources: {}
subresources:
status: {}
status:
acceptedNames:
kind: ""

View File

@@ -16,16 +16,7 @@ spec:
singular: deletebackuprequest
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: The name of the backup to be deleted
jsonPath: .spec.backupName
name: BackupName
type: string
- description: The status of the deletion request
jsonPath: .status.phase
name: Status
type: string
name: v1
- name: v1
schema:
openAPIV3Schema:
description: DeleteBackupRequest is a request to delete one or more backups.
@@ -72,7 +63,6 @@ spec:
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""

View File

@@ -85,6 +85,8 @@ spec:
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""

View File

@@ -16,40 +16,7 @@ spec:
singular: podvolumebackup
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Pod Volume Backup status such as New/InProgress
jsonPath: .status.phase
name: Status
type: string
- description: Time when this backup was started
jsonPath: .status.startTimestamp
name: Created
type: date
- description: Namespace of the pod containing the volume to be backed up
jsonPath: .spec.pod.namespace
name: Namespace
type: string
- description: Name of the pod containing the volume to be backed up
jsonPath: .spec.pod.name
name: Pod
type: string
- description: Name of the volume to be backed up
jsonPath: .spec.volume
name: Volume
type: string
- description: Restic repository identifier for this backup
jsonPath: .spec.repoIdentifier
name: Restic Repo
type: string
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
name: Storage Location
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
- name: v1
schema:
openAPIV3Schema:
properties:
@@ -186,7 +153,6 @@ spec:
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""

View File

@@ -16,37 +16,7 @@ spec:
singular: podvolumerestore
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Namespace of the pod containing the volume to be restored
jsonPath: .spec.pod.namespace
name: Namespace
type: string
- description: Name of the pod containing the volume to be restored
jsonPath: .spec.pod.name
name: Pod
type: string
- description: Name of the volume to be restored
jsonPath: .spec.volume
name: Volume
type: string
- description: Pod Volume Restore status such as New/InProgress
jsonPath: .status.phase
name: Status
type: string
- description: Pod Volume Restore status such as New/InProgress
format: int64
jsonPath: .status.progress.totalBytes
name: TotalBytes
type: integer
- description: Pod Volume Restore status such as New/InProgress
format: int64
jsonPath: .status.progress.bytesDone
name: BytesDone
type: integer
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
- name: v1
schema:
openAPIV3Schema:
properties:
@@ -166,7 +136,6 @@ spec:
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""

View File

@@ -16,11 +16,7 @@ spec:
singular: resticrepository
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
- name: v1
schema:
openAPIV3Schema:
properties:
@@ -85,7 +81,6 @@ spec:
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""

View File

@@ -53,11 +53,6 @@ spec:
type: string
nullable: true
type: array
existingResourcePolicy:
description: ExistingResourcePolicy specifies the restore behaviour
for the kubernetes resource to be restored
nullable: true
type: string
hooks:
description: Hooks represent custom behaviors that should be executed
during or post restore.
@@ -1708,61 +1703,6 @@ spec:
included in the map will be restored into namespaces of the same
name.
type: object
orLabelSelectors:
description: OrLabelSelectors is list of metav1.LabelSelector to filter
with when restoring individual objects from the backup. If multiple
provided they will be joined by the OR operator. LabelSelector as
well as OrLabelSelectors cannot co-exist in restore request, only
one of them can be used
items:
description: A label selector is a label query over a set of resources.
The result of matchLabels and matchExpressions are ANDed. An empty
label selector matches all objects. A null label selector matches
no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the
key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
nullable: true
type: array
preserveNodePorts:
description: PreserveNodePorts specifies whether to restore old nodePorts
from backup.
@@ -1773,26 +1713,6 @@ spec:
PVs from snapshot (via the cloudprovider).
nullable: true
type: boolean
restoreStatus:
description: RestoreStatus specifies which resources we should restore
the status field. If nil, no objects are included. Optional.
nullable: true
properties:
excludedResources:
description: ExcludedResources specifies the resources to which
will not restore the status.
items:
type: string
nullable: true
type: array
includedResources:
description: IncludedResources specifies the resources to which
will restore the status. If empty, it applies to all resources.
items:
type: string
nullable: true
type: array
type: object
scheduleName:
description: ScheduleName is the unique name of the Velero schedule
to restore from. If specified, and BackupName is empty, Velero will

View File

@@ -16,23 +16,7 @@ spec:
singular: schedule
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Status of the schedule
jsonPath: .status.phase
name: Status
type: string
- description: A Cron expression defining when to run the Backup
jsonPath: .spec.schedule
name: Schedule
type: string
- description: The last time a Backup was run for this schedule
jsonPath: .status.lastBackup
name: LastBackup
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
- name: v1
schema:
openAPIV3Schema:
description: Schedule is a Velero resource that represents a pre-scheduled
@@ -61,11 +45,6 @@ spec:
description: Template is the definition of the Backup to be run on
the provided schedule
properties:
csiSnapshotTimeout:
description: CSISnapshotTimeout specifies the time used to wait
for CSI VolumeSnapshot status turns to ReadyToUse during creation,
before returning error as timeout. The default value is 10 minute.
type: string
defaultVolumesToRestic:
description: DefaultVolumesToRestic specifies whether restic should
be used to take a backup of all pod volumes by default.
@@ -348,61 +327,6 @@ spec:
type: string
type: object
type: object
orLabelSelectors:
description: OrLabelSelectors is list of metav1.LabelSelector
to filter with when adding individual objects to the backup.
If multiple provided they will be joined by the OR operator.
LabelSelector as well as OrLabelSelectors cannot co-exist in
backup request, only one of them can be used.
items:
description: A label selector is a label query over a set of
resources. The result of matchLabels and matchExpressions
are ANDed. An empty label selector matches all objects. A
null label selector matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
nullable: true
type: array
orderedResources:
additionalProperties:
type: string
@@ -469,7 +393,6 @@ spec:
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""

View File

@@ -77,6 +77,8 @@ spec:
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""

File diff suppressed because one or more lines are too long

View File

@@ -4,33 +4,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: velero-perms
name: manager-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumerclaims
verbs:
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- velero.io
resources:
- backups
verbs:
- create
- delete
- apiGroups:
- velero.io
resources:
@@ -51,26 +26,6 @@ rules:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- deletebackuprequests
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- deletebackuprequests/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
@@ -91,86 +46,6 @@ rules:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- podvolumebackups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- podvolumebackups/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- podvolumerestores
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- podvolumerestores/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- resticrepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- resticrepositories/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- schedules
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- schedules/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:

View File

@@ -1,262 +0,0 @@
# Add support for `ExistingResourcePolicy` to restore API
## Abstract
Velero currently does not support any restore policy on kubernetes resources that are already present in-cluster. Velero skips over the restore of the resource if it already exists in the namespace/cluster irrespective of whether the resource present in the restore is the same or different from the one present on the cluster. It is desired that Velero gives the option to the user to decide whether or not the resource in backup should overwrite the one present in the cluster.
## Background
As of Today, Velero will skip over the restoration of resources that already exist in the cluster. The current workflow followed by Velero is (Using a `service` that is backed up for example):
- Velero tries to attempt restore of the `service`
- Fetches the `service` from the cluster
- If the `service` exists then:
- Checks whether the `service` instance in the cluster is equal to the `service` instance present in backup
- If not equal then skips the restore of the `service` and adds a restore warning (except for [ServiceAccount objects](https://github.com/vmware-tanzu/velero/blob/574baeb3c920f97b47985ec3957debdc70bcd5f8/pkg/restore/restore.go#L1246))
- If equal then skips the restore of the `service` and mentions that the restore of resource `service` is skipped in logs
It is desired to add the functionality to specify whether or not to overwrite the instance of resource `service` in cluster with the one present in backup during the restore process.
Related issue: https://github.com/vmware-tanzu/velero/issues/4066
## Goals
- Add support for `ExistingResourcePolicy` to restore API for Kubernetes resources.
## Non Goals
- Change existing restore workflow for `ServiceAccount` objects
- Add support for `ExistingResourcePolicy` as `recreate` for Kubernetes resources. (Future scope feature)
## Unrelated Proposals (Completely different functionalities than the one proposed in the design)
- Add support for `ExistingResourcePolicy` to restore API for Non-Kubernetes resources.
- Add support for `ExistingResourcePolicy` to restore API for `PersistentVolume` data.
### Use-cases/Scenarios
### A. Production Cluster - Backup Cluster:
Let's say you have a Backup Cluster which is identical to the Production Cluster. After some operations/usage/time the Production Cluster had changed itself, there might be new deployments, some secrets might have been updated. Now, this means that the Backup cluster will no longer be identical to the Production Cluster. In order to keep the Backup Cluster up to date/identical to the Production Cluster with respect to Kubernetes resources except PV data we would like to use Velero for scheduling new backups which would in turn help us update the Backup Cluster via Velero restore.
Reference: https://github.com/vmware-tanzu/velero/issues/4066#issuecomment-954320686
### B. Help identify resource delta:
Here delta resources mean the resources restored by a previous backup, but they are no longer in the latest backup. Let's follow a sequence of steps to understand this scenario:
- Consider there are 2 clusters, Cluster A, which has 3 resources - P1, P2 and P3.
- Create a Backup1 from Cluster A which has P1, P2 and P3.
- Perform restore on a new Cluster B using Backup1.
- Now, Lets say in Cluster A resource P1 gets deleted and resource P2 gets updated.
- Create a new Backup2 with the new state of Cluster A, keep in mind Backup1 has P1, P2 and P3 while Backup2 has P2' and P3.
- So the Delta here is (|Cluster B - Backup2|), Delete P1 and Update P2.
- During Restore time we would want the Restore to help us identify this resource delta.
Reference: https://github.com/vmware-tanzu/velero/pull/4613#issuecomment-1027260446
## High-Level Design
### Approach 1: Add a new spec field `existingResourcePolicy` to the Restore API
In this approach we do *not* change existing velero behavior. If the resource to restore in cluster is equal to the one backed up then do nothing following current Velero behavior. For resources that already exist in the cluster that are not equal to the resource in the backup (other than Service Accounts). We add a new optional spec field `existingResourcePolicy` which can have the following values:
1. `none`: This is the existing behavior, if Velero encounters a resource that already exists in the cluster, we simply
skip restoration.
2. `update`: This option would provide the following behavior.
- Unchanged resources: Velero would update the backup/restore labels on the unchanged resources, if labels patch fails Velero adds a restore error.
- Changed resources: Velero will first try to patch the changed resource, Now if the patch:
- succeeds: Then the in-cluster resource gets updated with the labels as well as the resource diff
- fails: Velero adds a restore warning and tries to just update the backup/restore labels on the resource, if the labels patch also fails then we add restore error.
3. `recreate`: If resource already exists, then Velero will delete it and recreate the resource.
*Note:* The `recreate` option is a non-goal for this enhancement proposal, but it is considered as a future scope.
Another thing to highlight is that Velero will not be deleting any resources in any of the policy options proposed in
this design but Velero will patch the resources in `update` policy option.
Example:
A. The following Restore will execute the `existingResourcePolicy` restore type `none` for the `services` and `deployments` present in the `velero-protection` namespace.
```
Kind: Restore
includeNamespaces: velero-protection
includeResources:
- services
- deployments
existingResourcePolicy: none
```
B. The following Restore will execute the `existingResourcePolicy` restore type `update` for the `secrets` and `daemonsets` present in the `gdpr-application` namespace.
```
Kind: Restore
includeNamespaces: gdpr-application
includeResources:
- secrets
- daemonsets
existingResourcePolicy: update
```
### Approach 2: Add a new spec field `existingResourcePolicyConfig` to the Restore API
In this approach we give user the ability to specify which resources are to be included for a particular kind of force update behaviour, essentially a more granular approach where in the user is able to specify a resource:behaviour mapping. It would look like:
`existingResourcePolicyConfig`:
- `patch:`
- `includedResources:` [ ]string
- `recreate:`
- `includedResources:` [ ]string
*Note:*
- There is no `none` behaviour in this approach as that would conform to the current/default Velero restore behaviour.
- The `recreate` option is a non-goal for this enhancement proposal, but it is considered as a future scope.
Example:
A. The following Restore will execute the restore type `patch` and apply the `existingResourcePolicyConfig` for `secrets` and `daemonsets` present in the `inventory-app` namespace.
```
Kind: Restore
includeNamespaces: inventory-app
existingResourcePolicyConfig:
patch:
includedResources
- secrets
- daemonsets
```
### Approach 3: Combination of Approach 1 and Approach 2
Now, this approach is somewhat a combination of the aforementioned approaches. Here we propose addition of two spec fields to the Restore API - `existingResourceDefaultPolicy` and `existingResourcePolicyOverrides`. As the names suggest ,the idea being that `existingResourceDefaultPolicy` would describe the default velero behaviour for this restore and `existingResourcePolicyOverrides` would override the default policy explicitly for some resources.
Example:
A. The following Restore will execute the restore type `patch` as the `existingResourceDefaultPolicy` but will override the default policy for `secrets` using the `existingResourcePolicyOverrides` spec as `none`.
```
Kind: Restore
includeNamespaces: inventory-app
existingResourceDefaultPolicy: patch
existingResourcePolicyOverrides:
none:
includedResources
- secrets
```
## Detailed Design
### Approach 1: Add a new spec field `existingResourcePolicy` to the Restore API
The `existingResourcePolicy` spec field will be an `PolicyType` type field.
Restore API:
```
type RestoreSpec struct {
.
.
.
// ExistingResourcePolicy specifies the restore behaviour for the kubernetes resource to be restored
// +optional
ExistingResourcePolicy PolicyType
}
```
PolicyType:
```
type PolicyType string
const PolicyTypeNone PolicyType = "none"
const PolicyTypePatch PolicyType = "update"
```
### Approach 2: Add a new spec field `existingResourcePolicyConfig` to the Restore API
The `existingResourcePolicyConfig` will be a spec of type `PolicyConfiguration` which gets added to the Restore API.
Restore API:
```
type RestoreSpec struct {
.
.
.
// ExistingResourcePolicyConfig specifies the restore behaviour for a particular/list of kubernetes resource(s) to be restored
// +optional
ExistingResourcePolicyConfig []PolicyConfiguration
}
```
PolicyConfiguration:
```
type PolicyConfiguration struct {
PolicyTypeMapping map[PolicyType]ResourceList
}
```
PolicyType:
```
type PolicyType string
const PolicyTypePatch PolicyType = "patch"
const PolicyTypeRecreate PolicyType = "recreate"
```
ResourceList:
```
type ResourceList struct {
IncludedResources []string
}
```
### Approach 3: Combination of Approach 1 and Approach 2
Restore API:
```
type RestoreSpec struct {
.
.
.
// ExistingResourceDefaultPolicy specifies the default restore behaviour for the kubernetes resource to be restored
// +optional
existingResourceDefaultPolicy PolicyType
// ExistingResourcePolicyOverrides specifies the restore behaviour for a particular/list of kubernetes resource(s) to be restored
// +optional
existingResourcePolicyOverrides []PolicyConfiguration
}
```
PolicyType:
```
type PolicyType string
const PolicyTypeNone PolicyType = "none"
const PolicyTypePatch PolicyType = "patch"
const PolicyTypeRecreate PolicyType = "recreate"
```
PolicyConfiguration:
```
type PolicyConfiguration struct {
PolicyTypeMapping map[PolicyType]ResourceList
}
```
ResourceList:
```
type ResourceList struct {
IncludedResources []string
}
```
The restore workflow changes will be done [here](https://github.com/vmware-tanzu/velero/blob/b40bbda2d62af2f35d1406b9af4d387d4b396839/pkg/restore/restore.go#L1245)
### CLI changes for Approach 1
We would introduce a new CLI flag called `existing-resource-policy` of string type. This flag would be used to accept the
policy from the user. The velero restore command would look somewhat like this:
```
velero create restore <restore_name> --existing-resource-policy=update
```
Help message `Restore Policy to be used during the restore workflow, can be - none, update`
The CLI changes will go at `pkg/cmd/cli/restore/create.go`
We would also add a validation which checks for invalid policy values provided to this flag.
Restore describer will also be updated to reflect the policy `pkg/cmd/util/output/restore_describer.go`
### Implementation Decision
We have decided to go ahead with the implementation of Approach 1 as:
- It is easier to implement
- It is also easier to scale and leaves room for improvement and the door open to expanding to approach 3
- It also provides an option to preserve the existing velero restore workflow

View File

@@ -1,138 +0,0 @@
# Ensure support for backing up resources based on multiple labels
## Abstract
As of today Velero supports filtering of resources based on single label selector per backup. It is desired that Velero
support backing up of resources based on multiple labels (OR logic).
**Note:** This solution is required because kubernetes label selectors only allow AND logic of labels.
## Background
Currently, Velero's Backup/Restore API has a spec field `LabelSelector` which helps in filtering of resources based on
a **single** label value per backup/restore request. For instance, if the user specifies the `Backup.Spec.LabelSelector` as
`data-protection-app: true`, Velero will grab all the resources that possess this label and perform the backup
operation on them. The `LabelSelector` field does not accept more than one labels, and thus if the user want to take
backup for resources consisting of a label from a set of labels (label1 OR label2 OR label3) then the user needs to
create multiple backups per label rule. It would be really useful if Velero Backup API could respect a set of
labels (OR Rule) for a single backup request.
Related Issue: https://github.com/vmware-tanzu/velero/issues/1508
## Goals
- Enable support for backing up resources based on multiple labels (OR Logic) in a single backup config.
- Enable support for restoring resources based on multiple labels (OR Logic) in a single restore config.
## Use Case/Scenario
Let's say as a Velero user you want to take a backup of secrets, but all these secrets do not have one single consistent
label on them. We want to take backup of secrets having any one label in `app=gdpr`, `app=wpa` and `app=ccpa`. Here
we would have to create 3 instances of backup for each label rule. This can become cumbersome at scale.
## High-Level Design
### Addition of `OrLabelSelectors` spec to Velero Backup/Restore API
For Velero to back up resources if they consist of any one label from a set of labels, we would like to add a new spec
field `OrLabelSelectors` which would enable user to specify them. The Velero backup would somewhat look like:
```
apiVersion: velero.io/v1
kind: Backup
metadata:
name: backup-101
namespace: openshift-adp
spec:
includedNamespaces:
- test
storageLocation: velero-sample-1
ttl: 720h0m0s
orLabelSelectors:
- matchLabels:
app=gdpr
- matchLabels:
app=wpa
- matchLabels:
app=ccpa
```
**Note:** This approach will **not** be changing any current behavior related to Backup API spec `LabelSelector`. Rather we
propose that the label in `LabelSelector` spec and labels in `OrLabelSelectors` should be treated as different Velero functionalities.
Both these fields will be treated as separate Velero Backup API specs. If `LabelSelector` (singular) is present then just match that label.
And if `OrLabelSelectors` is present then match to any label in the set specified by the user. For backup case, if both the `LabelSelector` and `OrLabelSelectors`
are specified (we do not anticipate this as a real world use-case) then the `OrLabelSelectors` will take precedence, `LabelSelector` will
only be used to filter only when `OrLabelSelectors` is not specified by the user. This helps to keep both spec behaviour independent and not confuse the users.
This way we preserve the existing Velero behaviour and implement the new functionality in a much cleaner way.
For instance, let's take a look the following cases:
1. Only `LabelSelector` specified: Velero will create a backup with resources matching label `app=protect-db`
```
apiVersion: velero.io/v1
kind: Backup
metadata:
name: backup-101
namespace: openshift-adp
spec:
includedNamespaces:
- test
storageLocation: velero-sample-1
ttl: 720h0m0s
labelSelector:
- matchLabels:
app=gdpr
```
2. Only `OrLabelSelectors` specified: Velero will create a backup with resources matching any label from set `{app=gdpr, app=wpa, app=ccpa}`
```
apiVersion: velero.io/v1
kind: Backup
metadata:
name: backup-101
namespace: openshift-adp
spec:
includedNamespaces:
- test
storageLocation: velero-sample-1
ttl: 720h0m0s
orLabelSelectors:
- matchLabels:
app=gdpr
- matchLabels:
app=wpa
- matchLabels:
app=ccpa
```
Similar implementation will be done for the Restore API as well.
## Detailed Design
With the Introduction of `OrLabelSelectors` the BackupSpec and RestoreSpec will look like:
BackupSpec:
```
type BackupSpec struct {
[...]
// OrLabelSelectors is a set of []metav1.LabelSelector to filter with
// when adding individual objects to the backup. Resources matching any one
// label from the set of labels will be added to the backup. If empty
// or nil, all objects are included. Optional.
// +optional
OrLabelSelectors []\*metav1.LabelSelector
[...]
}
```
RestoreSpec:
```
type RestoreSpec struct {
[...]
// OrLabelSelectors is a set of []metav1.LabelSelector to filter with
// when restoring objects from the backup. Resources matching any one
// label from the set of labels will be restored from the backup. If empty
// or nil, all objects are included from the backup. Optional.
// +optional
OrLabelSelectors []\*metav1.LabelSelector
[...]
}
```
The logic to collect resources to be backed up for a particular backup will be updated in the `backup/item_collector.go`
around [here](https://github.com/vmware-tanzu/velero/blob/574baeb3c920f97b47985ec3957debdc70bcd5f8/pkg/backup/item_collector.go#L294).
And for filtering the resources to be restored, the changes will go [here](https://github.com/vmware-tanzu/velero/blob/d1063bda7e513150fd9ae09c3c3c8b1115cb1965/pkg/restore/restore.go#L1769)
**Note:**
- This feature will not be exposed via Velero CLI.

View File

@@ -1,735 +0,0 @@
# Pre-Backup, Post-Backup, Pre-Restore, and Post-Restore Action Plugin Hooks
## Abstract
Velero should provide a way to trigger actions before and after each backup and restore.
**Important**: These proposed plugin hooks are fundamentally different from the existing plugin hooks, BackupItemAction and RestoreItemAction, which are triggered per resource item during backup and restore, respectively.
The proposed plugin hooks are to be executed only once: pre-backup (before backup starts), post-backup (after the backup is completed and uploaded to object storage, including volumes snapshots), pre-restore (before restore starts) and post-restore (after the restore is completed, including volumes are restored).
### PreBackup and PostBackup Actions
For the backup, the sequence of events of Velero backup are the following (these sequence depicted is prior upcoming changes for [upload progress #3533](https://github.com/vmware-tanzu/velero/issues/3533) ):
```
New Backup Request
|--> Validation of the request
|--> Set Backup Phase "In Progress"
| --> Start Backup
| --> Discover all Plugins
|--> Check if Backup Exists
|--> Backup all K8s Resource Items
|--> Perform all Volumes Snapshots
|--> Final Backup Phase is determined
|--> Persist Backup and Logs on Object Storage
```
We propose the pre-backup and post-backup plugin hooks to be executed in this sequence:
```
New Backup Request
|--> Validation of the request
|--> Set Backup Phase "In Progress"
| --> Start Backup
| --> Discover all Plugins
|--> Check if Backup Exists
|--> **PreBackupActions** are executed, logging actions on existent backup log file
|--> Backup all K8s Resource Items
|--> Perform all Volumes Snapshots
|--> Final Backup Phase is determined
|--> Persist Backup and logs on Object Storage
|--> **PostBackupActions** are executed, logging to its own file
```
These plugin hooks will be invoked:
- PreBackupAction: plugin actions are executed after the backup object is created and validated but before the backup is being processed, more precisely _before_ function [c.backupper.Backup](https://github.com/vmware-tanzu/velero/blob/74476db9d791fa91bba0147eac8ec189820adb3d/pkg/controller/backup_controller.go#L590). If the PreBackupActions return an err, the backup object is not processed and the Backup phase will be set as `FailedPreBackupActions`.
- PostBackupAction: plugin actions are executed after the backup is finished and persisted, more precisely _after_ function [c.runBackup](https://github.com/vmware-tanzu/velero/blob/74476db9d791fa91bba0147eac8ec189820adb3d/pkg/controller/backup_controller.go#L274).
The proposed plugin hooks will execute actions that will have statuses on their own:
`Backup.Status.PreBackupActionsStatuses` and `Backup.Status.PostBackupActionsStatuses` which will be an array of a proposed struct `ActionStatus` with PluginName, StartTimestamp, CompletionTimestamp and Phase.
### PreRestore and PostRestore Actions
For the restore, the sequence of events of Velero restore are the following (these sequence depicted is prior upcoming changes for [upload progress #3533](https://github.com/vmware-tanzu/velero/issues/3533) ):
```
New Restore Request
|--> Validation of the request
|--> Checks if restore is from a backup or a schedule
|--> Fetches backup
|--> Set Restore Phase "In Progress"
|--> Start Restore
|--> Discover all Plugins
|--> Download backup file to temp
|--> Fetch list of volumes snapshots
|--> Restore K8s items, including PVs
|--> Final Restore Phase is determined
|--> Persist Restore logs on Object Storage
```
We propose the pre-restore and post-restore plugin hooks to be executed in this sequence:
```
New Restore Request
|--> Validation of the request
|--> Checks if restore is from a backup or a schedule
|--> Fetches backup
|--> Set Restore Phase "In Progress"
|--> Start Restore
|--> Discover all Plugins
|--> Download backup file to temp
|--> Fetch list of volumes snapshots
|--> **PreRestoreActions** are executed, logging actions on existent backup log file
|--> Restore K8s items, including PVs
|--> Final Restore Phase is determined
|--> Persist Restore logs on Object Storage
|--> **PostRestoreActions** are executed, logging to its own file
```
These plugin hooks will be invoked:
- PreRestoreAction: plugin actions are executed after the restore object is created and validated and before the backup object is fetched, more precisely in function `runValidatedRestore` _after_ function [info.backupStore.GetBackupVolumeSnapshots](https://github.com/vmware-tanzu/velero/blob/7c75cd6cf854064c9a454e53ba22cc5881d3f1f0/pkg/controller/restore_controller.go#L460). If the PreRestoreActions return an err, the restore object is not processed and the Restore phase will be set a `FailedPreRestoreActions`.
- PostRestoreAction: plugin actions are executed after the restore finishes processing all items and volumes snapshots are restored and logs persisted, more precisely in function `processRestore` _after_ setting [`restore.Status.CompletionTimestamp`](https://github.com/vmware-tanzu/velero/blob/7c75cd6cf854064c9a454e53ba22cc5881d3f1f0/pkg/controller/restore_controller.go#L273).
The proposed plugin hooks will execute actions that will have statuses on their own:
`Restore.Status.PreRestoreActionsStatuses` and `Restore.Status.PostRestoreActionsStatuses` which will be an array of a proposed struct `ActionStatus` with PluginName, StartTimestamp, CompletionTimestamp and Phase.
## Background
Increasingly, Velero is employed for workload migrations across different Kubernetes clusters.
Using Velero for migrations requires an atomic operation involving a Velero backup on a source cluster followed by a Velero restore on a destination cluster.
It is common during these migrations to perform many actions inside and outside Kubernetes clusters.
**Attention**: these actions are not per resource item, but they are actions to be executed _once_ before and/or after the migration itself (remember, migration in this context is Velero Backup + Velero Restore).
One important use case driving this proposal is migrating stateful workloads at scale across different clusters/storage backends.
Today, Velero's Restic integration is the response for such use cases, but there are some limitations:
- Quiesce/unquiesce workloads: Pod hooks are useful for quiescing/unquiescing workloads, but platform engineers often do not have the luxury/visibility/time/knowledge to go through each pod in order to add specific commands to quiesce/unquiesce workloads.
- Orphan PVC/PV pairs: PVCs/PVs that do not have associated running pods are not backed up and consequently, are not migrated.
Aiming to address these two limitations, and separate from this proposal, we would like to write a Velero plugin that takes advantage of the proposed Pre-Backup plugin hook. This plugin will be executed _once_ (not per resource item) prior backup. It will scale down the applications setting `.spec.replicas=0` to all deployments, statefulsets, daemonsets, replicasets, etc. and will start a small-footprint staging pod that will mount all PVC/PV pairs. Similarly, we would like to write another plugin that will utilize the proposed Post-Restore plugin hook. This plugin will unquiesce migrated applications by killing the staging pod and reinstating original `.spec.replicas` values after the Velero restore is completed.
Other examples of plugins that can use the proposed plugin hooks are:
- PostBackupAction: trigger a Velero Restore after a successful Velero backup (and complete the migration operation).
- PreRestoreAction: pre-expand the cluster's capacity via Cluster API to avoid starvation of cluster resources before the restore.
- PostRestoreAction: call actions to be performed outside Kubernetes clusters, such as configure a global load balancer (GLB) that enables the new cluster.
The post backup actions will be executed after the backup is uploaded (persisted) on the disk. The logs of post-backup actions will be uploaded on the disk once the actions are completed.
The post restore actions will be executed after the restore is uploaded (persisted) on the disk. The logs of post-restore actions will be uploaded on the disk once the actions are completed.
This design seeks to provide missing extension points. This proposal's scope is to only add the new plugin hooks, not the plugins themselves.
## Goals
- Provide PreBackupAction, PostBackupAction, PreRestoreAction, and PostRestoreAction APIs for plugins to implement.
- Update Velero backup and restore creation logic to invoke registered PreBackupAction and PreRestoreAction plugins before processing the backup and restore respectively.
- Update Velero backup and restore complete logic to invoke registered PostBackupAction and PostRestoreAction plugins the objects are uploaded on disk.
- Create one `ActionStatus` struct to keep track of execution of the plugin hooks. This struct has PluginName, StartTimestamp, CompletionTimestamp and Phase.
- Add sub statuses for the plugins on Backup object: `Backup.Status.PreBackupActionsStatuses` and `Backup.Status.PostBackupActionsStatuses`. They will be flagged as optional and nullable. They will be populated only each plugin registered for the PreBackup and PostBackup hooks, respectively.
- Add sub statuses for the plugins on Restore object: `Backup.Status.PreRestoreActionsStatuses` and `Backup.Status.PostRestoreActionsStatuses`. They will be flagged as optional and nullable. They will be populated only each plugin registered for the PreRestore and PostRestore hooks, respectively.
- that will be populated optionally if Pre/Post Backup/Restore.
## Non-Goals
- Specific implementations of the PreBackupAction, PostBackupAction, PreRestoreAction and PostRestoreAction API beyond test cases.
- For migration specific actions (Velero Backup + Velero Restore), add disk synchronization during the validation of the Restore (making sure the newly created backup will show during restore)
## High-Level Design
The Velero backup controller package will be modified for `PreBackupAction` and `PostBackupAction`.
The PreBackupAction plugin API will resemble the BackupItemAction plugin hook design, but with the fundamental difference that it will receive only as input the Velero `Backup` object created.
It will not receive any resource list items because the backup is not yet running at that stage.
In addition, the `PreBackupAction` interface will only have an `Execute()` method since the plugin will be executed once per Backup creation, not per item.
The Velero backup controller will be modified so that if there are any PreBackupAction plugins registered, they will be
The PostBackupAction plugin API will resemble the BackupItemAction plugin design, but with the fundamental difference that it will receive only as input the Velero `Backup` object without any resource list items.
By this stage, the backup has already been executed, with items backed up and volumes snapshots processed and persisted.
The `PostBackupAction` interface will only have an `Execute()` method since the plugin will be executed only once per Backup, not per item.
If there are any PostBackupAction plugins registered, they will be executed after the backup is finished and persisted, more precisely _after_ function [c.runBackup](https://github.com/vmware-tanzu/velero/blob/74476db9d791fa91bba0147eac8ec189820adb3d/pkg/controller/backup_controller.go#L274).
The Velero restore controller package will be modified for `PreRestoreAction` and `PostRestoreAction`.
The PreRestoreAction plugin API will resemble the RestoreItemAction plugin design, but with the fundamental difference that it will receive only as input the Velero `Restore` object created.
It will not receive any resource list items because the restore has not yet been running at that stage.
In addition, the `PreRestoreAction` interface will only have an `Execute()` method since the plugin will be executed only once per Restore creation, not per item.
The Velero restore controller will be modified so that if there are any PreRestoreAction plugins registered, they will be executed after the restore object is created and validated and before the backup object is fetched, more precisely in function `runValidatedRestore` _after_ function [info.backupStore.GetBackupVolumeSnapshots](https://github.com/vmware-tanzu/velero/blob/7c75cd6cf854064c9a454e53ba22cc5881d3f1f0/pkg/controller/restore_controller.go#L460). If the PreRestoreActions return an err, the restore object is not processed and the Restore phase will be set a `FailedPreRestoreActions`.
The PostRestoreAction plugin API will resemble the RestoreItemAction plugin design, but with the fundamental difference that it will receive only as input the Velero `Restore` object without any resource list items.
At this stage, the restore has already been executed.
The `PostRestoreAction` interface will only have an `Execute()` method since the plugin will be executed only once per Restore, not per item.
If any PostRestoreAction plugins are registered, they will be executed after the restore finishes processing all items and volumes snapshots are restored and logs persisted, more precisely in function `processRestore` _after_ setting [`restore.Status.CompletionTimestamp`](https://github.com/vmware-tanzu/velero/blob/7c75cd6cf854064c9a454e53ba22cc5881d3f1f0/pkg/controller/restore_controller.go#L273).
## Detailed Design
### New Status struct
To keep the status of the plugins, we propose the following struct:
```go
type ActionStatus struct {
// PluginName is the name of the registered plugin
// retrieved by the PluginManager as id.Name
// +optional
// +nullable
PluginName string `json:"pluginName,omitempty"`
// StartTimestamp records the time the plugin started.
// +optional
// +nullable
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
// CompletionTimestamp records the time the plugin was completed.
// +optional
// +nullable
CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"`
// Phase is the current state of the Action.
// +optional
// +nullable
Phase ActionPhase `json:"phase,omitempty"`
}
// ActionPhase is a string representation of the lifecycle phase of an action being executed by a plugin
// of a Velero backup.
// +kubebuilder:validation:Enum=InProgress;Completed;Failed
type ActionPhase string
const (
// ActionPhaseInProgress means the action has being executed
ActionPhaseInProgress ActionPhase = "InProgress"
// ActionPhaseCompleted means the action finished successfully
ActionPhaseCompleted ActionPhase = "Completed"
// ActionPhaseFailed means the action failed
ActionPhaseFailed ActionPhase = "Failed"
)
```
### Backup Status of the Plugins
The `Backup` Status section will have the follow:
```go
type BackupStatus struct {
(...)
// PreBackupActionsStatuses contains information about the pre backup plugins's execution.
// Note that this information is will be only populated if there are prebackup plugins actions
// registered
// +optional
// +nullable
PreBackupActionsStatuses *[]ActionStatus `json:"preBackupActionsStatuses,omitempty"`
// PostBackupActionsStatuses contains information about the post backup plugins's execution.
// Note that this information is will be only populated if there are postbackup plugins actions
// registered
// +optional
// +nullable
PostBackupActionsStatuses *[]ActionStatus `json:"postBackupActionsStatuses,omitempty"`
}
```
### Restore Status of the Plugins
The `Restore` Status section will have the follow:
```go
type RestoreStatus struct {
(...)
// PreRestoreActionsStatuses contains information about the pre Restore plugins's execution.
// Note that this information is will be only populated if there are preRestore plugins actions
// registered
// +optional
// +nullable
PreRestoreActionsStatuses *[]ActionStatus `json:"preRestoreActionsStatuses,omitempty"`
// PostRestoreActionsStatuses contains information about the post restore plugins's execution.
// Note that this information is will be only populated if there are postrestore plugins actions
// registered
// +optional
// +nullable
PostRestoreActionsStatuses *[]ActionStatus `json:"postRestoreActionsStatuses,omitempty"`
}
```
### New Backup and Restore Phases
#### New Backup Phase: FailedPreBackupActions
In case the PreBackupActionsStatuses has at least one `ActionPhase` = `Failed`, it means al least one of the plugins returned an error and consequently, the backup will not move forward. The final status of the Backup object will be set as `FailedPreBackupActions`:
```go
// BackupPhase is a string representation of the lifecycle phase
// of a Velero backup.
// +kubebuilder:validation:Enum=New;FailedValidation;FailedPreBackupActions;InProgress;Uploading;UploadingPartialFailure;Completed;PartiallyFailed;Failed;Deleting
type BackupPhase string
const (
(...)
// BackupPhaseFailedPreBackupActions means one or more the Pre Backup Actions has failed
// and therefore backup will not run.
BackupPhaseFailedPreBackupActions BackupPhase = "FailedPreBackupActions"
(...)
)
```
#### New Restore Phase FailedPreRestoreActions
In case the PreRestoreActionsStatuses has at least one `ActionPhase` = `Failed`, it means al least one of the plugins returned an error and consequently, the restore will not move forward. The final status of the Restore object will be set as `FailedPreRestoreActions`:
```go
// RestorePhase is a string representation of the lifecycle phase
// of a Velero restore
// +kubebuilder:validation:Enum=New;FailedValidation;FailedPreRestoreActions;InProgress;Completed;PartiallyFailed;Failed
type RestorePhase string
const (
(...)
// RestorePhaseFailedPreRestoreActions means one or more the Pre Restore Actions has failed
// and therefore restore will not run.
RestorePhaseFailedPreRestoreActions BackupPhase = "FailedPreRestoreActions"
(...)
)
```
### New Interface types
#### PreBackupAction
The `PreBackupAction` interface is as follows:
```go
// PreBackupAction provides a hook into the backup process before it begins.
type PreBackupAction interface {
// Execute the PreBackupAction plugin providing it access to the Backup that
// is being executed
Execute(backup *api.Backup) error
}
```
`PreBackupAction` will be defined in `pkg/plugin/velero/pre_backup_action.go`.
#### PostBackupAction
The `PostBackupAction` interface is as follows:
```go
// PostBackupAction provides a hook into the backup process after it completes.
type PostBackupAction interface {
// Execute the PostBackupAction plugin providing it access to the Backup that
// has been completed
Execute(backup *api.Backup) error
}
```
`PostBackupAction` will be defined in `pkg/plugin/velero/post_backup_action.go`.
#### PreRestoreAction
The `PreRestoreAction` interface is as follows:
```go
// PreRestoreAction provides a hook into the restore process before it begins.
type PreRestoreAction interface {
// Execute the PreRestoreAction plugin providing it access to the Restore that
// is being executed
Execute(restore *api.Restore) error
}
```
`PreRestoreAction` will be defined in `pkg/plugin/velero/pre_restore_action.go`.
#### PostRestoreAction
The `PostRestoreAction` interface is as follows:
```go
// PostRestoreAction provides a hook into the restore process after it completes.
type PostRestoreAction interface {
// Execute the PostRestoreAction plugin providing it access to the Restore that
// has been completed
Execute(restore *api.Restore) error
}
```
`PostRestoreAction` will be defined in `pkg/plugin/velero/post_restore_action.go`.
### New BackupStore Interface Methods
For the persistence of the logs originated from the PostBackup and PostRestore plugins, create two additional methods on `BackupStore` interface:
```go
type BackupStore interface {
(...)
PutPostBackuplog(backup string, log io.Reader) error
PutPostRestoreLog(backup, restore string, log io.Reader) error
(...)
```
The implementation of these new two methods will go hand-in-hand with the changes of uploading phases rebase.
### Generate Protobuf Definitions and Client/Servers
In `pkg/plugin/proto`, add the following:
1. Protobuf definitions will be necessary for PreBackupAction in `pkg/plugin/proto/PreBackupAction.proto`.
```protobuf
message PreBackupActionExecuteRequest {
...
}
service PreBackupAction {
rpc Execute(PreBackupActionExecuteRequest) returns (Empty)
}
```
Once these are written, then a client and server implementation can be written in `pkg/plugin/framework/pre_backup_action_client.go` and `pkg/plugin/framework/pre_backup_action_server.go`, respectively.
2. Protobuf definitions will be necessary for PostBackupAction in `pkg/plugin/proto/PostBackupAction.proto`.
```protobuf
message PostBackupActionExecuteRequest {
...
}
service PostBackupAction {
rpc Execute(PostBackupActionExecuteRequest) returns (Empty)
}
```
Once these are written, then a client and server implementation can be written in `pkg/plugin/framework/post_backup_action_client.go` and `pkg/plugin/framework/post_backup_action_server.go`, respectively.
3. Protobuf definitions will be necessary for PreRestoreAction in `pkg/plugin/proto/PreRestoreAction.proto`.
```protobuf
message PreRestoreActionExecuteRequest {
...
}
service PreRestoreAction {
rpc Execute(PreRestoreActionExecuteRequest) returns (Empty)
}
```
Once these are written, then a client and server implementation can be written in `pkg/plugin/framework/pre_restore_action_client.go` and `pkg/plugin/framework/pre_restore_action_server.go`, respectively.
4. Protobuf definitions will be necessary for PostRestoreAction in `pkg/plugin/proto/PostRestoreAction.proto`.
```protobuf
message PostRestoreActionExecuteRequest {
...
}
service PostRestoreAction {
rpc Execute(PostRestoreActionExecuteRequest) returns (Empty)
}
```
Once these are written, then a client and server implementation can be written in `pkg/plugin/framework/post_restore_action_client.go` and `pkg/plugin/framework/post_restore_action_server.go`, respectively.
### Restartable Delete Plugins
Similar to the `RestoreItemAction` and `BackupItemAction` plugins, restartable processes will need to be implemented (with the difference that there is no `AppliedTo()` method).
In `pkg/plugin/clientmgmt/`, add
1. `restartable_pre_backup_action.go`, creating the following unexported type:
```go
type restartablePreBackupAction struct {
key kindAndName
sharedPluginProcess RestartableProcess
}
func newRestartablePreBackupAction(name string, sharedPluginProcess RestartableProcess) *restartablePreBackupAction {
// ...
}
func (r *restartablePreBackupAction) getPreBackupAction() (velero.PreBackupAction, error) {
// ...
}
func (r *restartablePreBackupAction) getDelegate() (velero.PreBackupAction, error) {
// ...
}
// Execute restarts the plugin's process if needed, then delegates the call.
func (r *restartablePreBackupAction) Execute(input *velero.PreBackupActionInput) (error) {
// ...
}
```
2. `restartable_post_backup_action.go`, creating the following unexported type:
```go
type restartablePostBackupAction struct {
key kindAndName
sharedPluginProcess RestartableProcess
}
func newRestartablePostBackupAction(name string, sharedPluginProcess RestartableProcess) *restartablePostBackupAction {
// ...
}
func (r *restartablePostBackupAction) getPostBackupAction() (velero.PostBackupAction, error) {
// ...
}
func (r *restartablePostBackupAction) getDelegate() (velero.PostBackupAction, error) {
// ...
}
// Execute restarts the plugin's process if needed, then delegates the call.
func (r *restartablePostBackupAction) Execute(input *velero.PostBackupActionInput) (error) {
// ...
}
```
3. `restartable_pre_restore_action.go`, creating the following unexported type:
```go
type restartablePreRestoreAction struct {
key kindAndName
sharedPluginProcess RestartableProcess
}
func newRestartablePreRestoreAction(name string, sharedPluginProcess RestartableProcess) *restartablePreRestoreAction {
// ...
}
func (r *restartablePreRestoreAction) getPreRestoreAction() (velero.PreRestoreAction, error) {
// ...
}
func (r *restartablePreRestoreAction) getDelegate() (velero.PreRestoreAction, error) {
// ...
}
// Execute restarts the plugin's process if needed, then delegates the call.
func (r *restartablePreRestoreAction) Execute(input *velero.PreRestoreActionInput) (error) {
// ...
}
```
4. `restartable_post_restore_action.go`, creating the following unexported type:
```go
type restartablePostRestoreAction struct {
key kindAndName
sharedPluginProcess RestartableProcess
}
func newRestartablePostRestoreAction(name string, sharedPluginProcess RestartableProcess) *restartablePostRestoreAction {
// ...
}
func (r *restartablePostRestoreAction) getPostRestoreAction() (velero.PostRestoreAction, error) {
// ...
}
func (r *restartablePostRestoreAction) getDelegate() (velero.PostRestoreAction, error) {
// ...
}
// Execute restarts the plugin's process if needed, then delegates the call.
func (r *restartablePostRestoreAction) Execute(input *velero.PostRestoreActionInput) (error) {
// ...
}
```
### Plugin Manager Changes
Add the following methods to the `Manager` interface in `pkg/plugin/clientmgmt/manager.go`:
```go
type Manager interface {
...
// Get PreBackupAction returns a PreBackupAction plugin for name.
GetPreBackupAction(name string) (PreBackupAction, error)
// Get PreBackupActions returns the all PreBackupAction plugins.
GetPreBackupActions() ([]PreBackupAction, error)
// Get PostBackupAction returns a PostBackupAction plugin for name.
GetPostBackupAction(name string) (PostBackupAction, error)
// GetPostBackupActions returns the all PostBackupAction plugins.
GetPostBackupActions() ([]PostBackupAction, error)
// Get PreRestoreAction returns a PreRestoreAction plugin for name.
GetPreRestoreAction(name string) (PreRestoreAction, error)
// Get PreRestoreActions returns the all PreRestoreAction plugins.
GetPreRestoreActions() ([]PreRestoreAction, error)
// Get PostRestoreAction returns a PostRestoreAction plugin for name.
GetPostRestoreAction(name string) (PostRestoreAction, error)
// GetPostRestoreActions returns the all PostRestoreAction plugins.
GetPostRestoreActions() ([]PostRestoreAction, error)
}
```
`GetPreBackupAction` and `GetPreBackupActions` will invoke the `restartablePreBackupAction` implementations.
`GetPostBackupAction` and `GetPostBackupActions` will invoke the `restartablePostBackupAction` implementations.
`GetPreRestoreAction` and `GetPreRestoreActions` will invoke the `restartablePreRestoreAction` implementations.
`GetPostRestoreAction` and `GetPostRestoreActions` will invoke the `restartablePostRestoreAction` implementations.
### How to invoke the Plugins
#### Getting Pre/Post Backup Actions
Getting Actions on `backup_controller.go` in `runBackup`:
```go
backupLog.Info("Getting PreBackup actions")
preBackupActions, err := pluginManager.GetPreBackupActions()
if err != nil {
return err
}
backupLog.Info("Getting PostBackup actions")
postBackupActions, err := pluginManager.GetPostBackupActions()
if err != nil {
return err
}
```
#### Pre Backup Actions Plugins
Calling the Pre Backup actions:
```go
for _, preBackupAction := range preBackupActions {
err := preBackupAction.Execute(backup.Backup)
if err != nil {
backup.Backup.Status.Phase = velerov1api.BackupPhaseFailedPreBackupActions
return err
}
}
```
#### Post Backup Actions Plugins
Calling the Post Backup actions:
```go
for _, postBackupAction := range postBackupActions {
err := postBackupAction.Execute(backup.Backup)
if err != nil {
postBackupLog.Error(err)
}
}
```
#### Getting Pre/Post Restore Actions
Getting Actions on `restore_controller.go` in `runValidatedRestore`:
```go
restoreLog.Info("Getting PreRestore actions")
preRestoreActions, err := pluginManager.GetPreRestoreActions()
if err != nil {
return errors.Wrap(err, "error getting pre-restore actions")
}
restoreLog.Info("Getting PostRestore actions")
postRestoreActions, err := pluginManager.GetPostRestoreActions()
if err != nil {
return errors.Wrap(err, "error getting post-restore actions")
}
```
#### Pre Restore Actions Plugins
Calling the Pre Restore actions:
```go
for _, preRestoreAction := range preRestoreActions {
err := preRestoreAction.Execute(restoreReq.Restore)
if err != nil {
restoreReq.Restore.Status.Phase = velerov1api.RestorePhaseFailedPreRestoreActions
return errors.Wrap(err, "error executing pre-restore action")
}
}
```
#### Post Restore Actions Plugins
Calling the Post Restore actions:
```go
for _, postRestoreAction := range postRestoreActions {
err := postRestoreAction.Execute(restoreReq.Restore)
if err != nil {
postRestoreLog.Error(err.Error())
}
}
```
### Giving the User the Option to Skip the Execution of the Plugins
Velero plugins are loaded as init containers. If plugins are unloaded, they trigger a restart of the Velero controller.
Not mentioning if one plugin does get loaded for any reason (i.e., docker hub image pace limit), Velero does not start.
In other words, the constant load/unload of plugins can disrupt the Velero controller, and they cannot be the only method to run the actions from these plugins selectively.
As part of this proposal, we want to give the velero user the ability to skip the execution of the plugins via annotations on the Velero CR backup and restore objects.
If one of these exists, the given plugin, referenced below as `plugin-name`, will be skipped.
Backup Object Annotations:
```
<plugin-name>/prebackup=skip
<plugin-name>/postbackup=skip
```
Restore Object Annotations:
```
<plugin-name>/prerestore=skip
<plugin-name>/postrestore=skip
```
## Alternatives Considered
An alternative to these plugin hooks is to implement all the pre/post backup/restore logic _outside_ Velero.
In this case, one would need to write an external controller that works similar to what [Konveyor Crane](https://github.com/konveyor/mig-controller/blob/master/pkg/controller/migmigration/quiesce.go) does today when quiescing applications.
We find this a viable way, but we think that Velero users can benefit from Velero having greater embedded capabilities, which will allow users to write or load plugins extensions without relying on an external components.
## Security Considerations
The plugins will only be invoked if loaded per a user's discretion.
It is recommended to check security vulnerabilities before execution.
## Compatibility
In terms of backward compatibility, this design should stay compatible with most Velero installations that are upgrading.
If plugins are not present, then the backup/restore process should proceed the same way it worked before their inclusion.
## Implementation
The implementation dependencies are roughly in the order as they are described in the [Detailed Design](#detailed-design) section.
## Open Issues

View File

@@ -1,289 +0,0 @@
# Plugin Versioning
## Abstract
This proposal outlines an approach to support versioning of Velero's plugin APIs to enable changes to those APIs.
It will allow for backwards compatible changes to be made, such as the addition of new plugin methods, but also backwards incompatible changes such as method removal or method signature changes.
## Background
When changes are made to Veleros plugin APIs, there is no mechanism for the Velero server to communicate the version of the API that is supported, or for plugins to communicate what version they implement.
This means that any modification to a plugin API is a backwards incompatible change as it requires all plugins which implement the API to update and implement the new method.
There are several components involved to use plugins within Velero.
From the perspective of the core Velero codebase, all plugin kinds (e.g. `ObjectStore`, `BackupItemAction`) are defined by a single API interface and all interactions with plugins are managed by a plugin manager which provides an implementation of the plugin API interface for Velero to use.
Velero communicates with plugins via gRPC.
The core Velero project provides a framework (using the [go-plugin project](https://github.com/hashicorp/go-plugin)) for plugin authors to use to implement their plugins which manages the creation of gRPC servers and clients.
Velero plugins import the Velero plugin library in order to use this framework.
When a change is made to a plugin API, it needs to be made to the Go interface used by the Velero codebase, and also to the rpc service definition which is compiled to form part of the framework.
As each plugin kind is defined by a single interface, when a plugin imports the latest version of the Velero framework, it will need to implement the new APIs in order to build and run successfully.
If a plugin does not use the latest version of the framework, and is used with a newer version of Velero that expects the plugin to implement those methods, this will result in a runtime error as the plugin is incompatible.
With this proposal, we aim to break this coupling and introduce plugin API versions.
## Scenarios to Support
The following describes interactions between Velero and its plugins that will be supported with the implementation of this proposal.
For the purposes of this list, we will refer to existing Velero and plugin versions as `v1` and all following versions as version `n`.
Velero client communicating with plugins or plugin client calling other plugins:
- Version `n` client will be able to communicate with Version `n` plugin
- Version `n` client will be able to communicate with all previous versions of the plugin (Version `n-1` back to `v1`)
Velero plugins importing Velero framework:
- `v1` plugin built against Version `n` Velero framework
- A plugin may choose to only implement a `v1` API, but it must be able to be built using Version `n` of the Velero framework
## Goals
- Allow plugin APIs to change without requiring all plugins to implement the latest changes (even if they upgrade the version of Velero that is imported)
- Allow plugins to choose which plugin versions they support and enable them to support multiple versions
- Support breaking changes in the plugin APIs such as method removal or method signature changes
- Establish a design process for modifying plugin APIs such as method addition and removal and signature changes
- Establish a process for newer Velero clients to use older versions of a plugin API through adaptation
## Non Goals
- Change how plugins are managed or added
- Allow older plugin clients to communicate with new versions of plugins
## High-Level Design
With each change to a plugin API, a new version of the plugin interface and the proto service definition will be created which describes the new plugin API.
The plugin framework will be adapted to allow these new plugin versions to be registered.
Plugins can opt to implement any or all versions of an API, however Velero will always attempt to use the latest version, and the plugin management will be modified to adapt earlier versions of a plugin to be compatible with the latest API where possible.
Under the existing plugin framework, any new plugin version will be treated as a new plugin with a new kind.
The plugin manager (which provides implementations of a plugin to Velero) will include an adapter layer which will manage the different versions and provide the adaptation for versions which do not implement the latest version of the plugin API.
Providing an adaptation layer enables Velero and other plugin clients to use an older version of a plugin if it can be safely adapted.
As the plugins will be able to introduce backwards incompatible changes, it will _not_ be possible for older version of Velero to use plugins which only support the latest versions of the plugin APIs.
Although adding new rpc methods to a service is considered a backwards compatible change within gRPC, due to the way the proto definitions are compiled and included in the framework used by plugins, this will require every plugin to implement the new methods.
Instead, we are opting to treat the addition of a method to an API as one requiring versioning.
The addition of optional fields to existing structs which are used as parameters to or return values of API methods will not be considered as a change requiring versioning.
These kinds of changes do not modify method signatures and have been safely made in the past with no impact on existing plugins.
## Detailed Design
The following areas will need to be adapted to support plugin versioning.
### Plugin Interface Definitions
To provide versioned plugins, any change to a plugin interface (method addition, removal, or signature change) will require a new versioned interface to be created.
Currently, all plugin interface definitions reside in `pkg/plugin/velero` in a file corresponding to their plugin kind.
These files will be rearranged to be grouped by kind and then versioned: `pkg/plugin/velero/<plugin_kind>/<version>/`.
The following are examples of how each change may be treated:
#### Complete Interface Change
If the entire `ObjectStore` interface is being changed such that no previous methods are being included, a file would be added to `pkg/plugin/velero/objectstore/v2/` and would contain the new interface definition:
```
type ObjectStore interface {
// Only include new methods that the new API version will support
NewMethod()
// ...
}
```
#### Method Addition
If a method is being added to the `ObjectStore` API, a file would be added to `pkg/plugin/velero/objectstore/v2/` and may contain a new API definition as follows:
```
import "github.com/vmware-tanzu/velero/pkg/plugin/velero/objectstore/v1"
type ObjectStore interface {
// Import all the methods from the previous version of the API if they are to be included as is
v1.ObjectStore
// Provide definitions of any new methods
NewMethod()
```
#### Method Removal
If a method is being removed from the `ObjectStore` API, a file would be added to `pkg/plugin/velero/objectstore/v2/` and may contain a new API definition as follows:
```
type ObjectStore interface {
// Methods which are required from the previous API version must be included, for example
Init(config)
PutObject(bucket, key, body)
// ...
// Methods which are to be removed are not included
```
#### Method Signature modification
If a method signature in the `ObjectStore` API is being modified, a file would be added to `pkg/plugin/velero/objectstore/v2/` and may contain a new API definition as follows:
```
type ObjectStore interface {
// Methods which are required from the previous API version must be included, for example
Init(config)
PutObject(bucket, key, body)
// ...
// Provide new definitions for methods which are being modified
List(bucket, prefix, newParameter)
}
```
### Proto Service Definitions
The proto service definitions of the plugins will also be versioned and arranged by their plugin kind.
Currently, all the proto definitions reside under `pkg/plugin/proto` in a file corresponding to their plugin kind.
These files will be rearranged to be grouped by kind and then versioned: `pkg/plugin/proto/<plugin_kind>/<version>`.
The scripts to compile the proto service definitions will need to be updated to place the generated Go code under a matching directory structure.
It is not possible to import an existing proto service into a new one, so any methods will need to be duplicated across versions if they are required by the new version.
The message definitions can be shared however, so these could be extracted from the service definition files and placed in a file that can be shared across all versions of the service.
### Plugin Framework
To allow plugins to register which versions of the API they implement, the plugin framework will need to be adapted to accept new versions.
Currently, the plugin manager stores a [`map[string]RestartableProcess`](https://github.com/vmware-tanzu/velero/blob/main/pkg/plugin/clientmgmt/manager.go#L69), where the string key is the binary name for the plugin process (e.g. "velero-plugin-for-aws").
Each `RestartableProcess` contains a [`map[kindAndName]interface{}`](https://github.com/vmware-tanzu/velero/blob/main/pkg/plugin/clientmgmt/restartable_process.go#L60) which represents each of the unique plugin implementations provided by that binary.
[`kindAndName`](https://github.com/vmware-tanzu/velero/blob/main/pkg/plugin/clientmgmt/registry.go#L42) is a struct which combines the plugin kind (`ObjectStore`, `VolumeSnapshotter`) and the plugin name ("velero.io/aws", "velero.io/azure").
Each plugin version registration must be unique (to allow for multiple versions to be implemented within the same plugin binary).
This will be achieved by adding a specific registration method for each version to the Server interface in the plugin framework.
For example, if adding a V2 `RestoreItemAction` plugin, the Server interface would be modified to add the `RegisterRestoreItemActionV2` method.
This would require [adding a new plugin Kind const](https://github.com/vmware-tanzu/velero/blob/main/pkg/plugin/framework/plugin_kinds.go#L28-L46) to represent the new plugin version, e.g. `PluginKindRestoreItemActionV2`.
It also requires the creation of a new implementation of the go-plugin interface ([example](https://github.com/vmware-tanzu/velero/blob/main/pkg/plugin/framework/object_store.go)) to support that version and use the generated gRPC code from the proto definition (including a client and server implementation).
The Server will also need to be adapted to recognize this new plugin Kind and to serve the new implementation.
Existing plugin Kind consts and registration methods will be left unchanged and will correspond to the current version of the plugin APIs (assumed to be v1).
### Plugin Manager
The plugin manager is responsible for managing the lifecycle of plugins.
It provides an interface which is used by Velero to retrieve an instance of a plugin kind with a specific name (e.g. `ObjectStore` with the name "velero.io/aws").
The manager contains a registry of all available plugins which is populated during the main Velero server startup.
When the plugin manager is requested to provide a particular plugin, it checks the registry for that plugin kind and name.
If it is available in the registry, the manager retrieves a `RestartableProcess` for the plugin binary, creating it if it does not already exist.
That `RestartableProcess` is then used by individual restartable implementations of a plugin kind (e.g. `restartableObjectStore`, `restartableVolumeSnapshotter`).
As new plugin versions are added, the plugin manager will be modified to always retrieve the latest version of a plugin kind.
This is to allow the remainder of the Velero codebase to assume that it will always interact with the latest version of a plugin.
If the latest version of a plugin is not available, it will attempt to fall back to previous versions and use an implementation adapted to the latest version if available.
It will be up to the author of new plugin versions to determine whether a previous version of a plugin can be adapted to work with the interface of the new version.
For each plugin kind, a new `Restartable<PluginKind>` struct will be introduced which will contain the plugin Kind and a function, `Get`, which will instantiate a restartable instance of that plugin kind and perform any adaptation required to make it compatible with the latest version.
For example, `RestartableObjectStore` or `RestartableVolumeSnapshotter`.
For each restartable plugin kind, a new function will be introduced which will return a slice of `Restartable<PluginKind>` objects, sorted by version in descending order.
The manager will iterate through the list of `Restartable<PluginKind>`s and will check the registry for the given plugin kind and name.
If the requested version is not found, it will skip and continue to iterate, attempting to fetch previous versions of the plugin kind.
Once the requested version is found, the `Get` function will be called, returning the restartable implementation of the latest version of that plugin Kind.
```
type RestartableObjectStore struct {
kind framework.PluginKind
// Get returns a restartable ObjectStore for the given name and process, wrapping if necessary
Get func(name string, restartableProcess RestartableProcess) v2.ObjectStore
}
func (m *manager) restartableObjectStores() []RestartableObjectStore {
return []RestartableObjectStore{
{
kind: framework.PluginKindObjectStoreV2,
Get: newRestartableObjectStoreV2,
},
{
kind: framework.PluginKindObjectStore,
Get: func(name string, restartableProcess RestartableProcess) v2.ObjectStore {
// Adapt the existing restartable v1 plugin to be compatible with the v2 interface
return newAdaptedV1ObjectStore(newRestartableObjectStore(name, restartableProcess))
},
},
}
}
// GetObjectStore returns a restartableObjectStore for name.
func (m *manager) GetObjectStore(name string) (v2.ObjectStore, error) {
name = sanitizeName(name)
for _, restartableObjStore := range m.restartableObjectStores() {
restartableProcess, err := m.getRestartableProcess(restartableObjStore.kind, name)
if err != nil {
// Check if plugin was not found
if errors.Is(err, &pluginNotFoundError{}) {
continue
}
return nil, err
}
return restartableObjStore.Get(name, restartableProcess), nil
}
return nil, fmt.Errorf("unable to get valid ObjectStore for %q", name)
}
```
If the previous version is not available, or can not be adapted to the latest version, it should not be included in the `restartableObjectStores` slice.
This will result in an error being returned as is currently the case when a plugin implementation for a particular kind and provider can not be found.
There are situations where it may be beneficial to check at the point where a plugin API call is made whether it implements a specific version of the API.
This is something that can be addressed with future amendments to this design, however it does not seem to be necessary at this time.
#### Plugin Adaptation
When a new plugin API version is being proposed, it will be up to the author and the maintainer team to determine whether older versions of an API can be safely adapted to the latest version.
An adaptation will implement the latest version of the plugin API interface but will use the methods from the version that is being adapted.
In cases where the methods signatures remain the same, the adaptation layer will call through to the same method in the version being adapted.
Examples where an adaptation may be safe:
- A method signature is being changed to add a new parameter but the parameter could be optional (for example, adding a context parameter). The adaptation could call through to the method provided in the previous version but omit the parameter.
- A method signature is being changed to remove a parameter, but it is safe to pass a default value to the previous version. The adaptation could call through to the method provided in the previous version but use a default value for the parameter.
- A new method is being added but does not impact any existing behaviour of Velero (for example, a new method which will allow Velero to [wait for additional items to be ready](https://github.com/vmware-tanzu/velero/blob/main/design/wait-for-additional-items.md)). The adaptation would return a value which allows the existing behaviour to be performed.
- A method is being deleted as it is no longer used. The adaptation would call through to any methods which are still included but would omit the deleted method in the adaptation.
Examples where an adaptation may not be safe:
- A new method is added which is used to provide new critical functionality in Velero. If this functionality can not be replicated using existing plugin methods in previous API versions, this should not be adapted and instead the plugin manager should return an error indicating that the plugin implementation can not be found.
### Restartable Plugin Process
As new versions of plugins are added, new restartable implementations of plugins will also need to be created.
These are currently located within "pkg/plugin/clientmgmt" but will be rearranged to be grouped by kind and version like other plugin files.
## Versioning Considerations
It should be noted that if changes are being made to a plugin's API, it will only be necessary to bump the API version once within a release cycle, regardless of how many changes are made within that cycle.
This is because the changes will only be available to consumers when they upgrade to the next minor version of the Velero library.
New plugin API versions will not be introduced or backported to patch releases.
Once a new minor or major version of Velero has been released however, any further changes will need to follow the process above and use a new API version.
## Alternatives Considered
### Relying on gRPCs backwards compatibility when adding new methods
One approach for adapting the plugin APIs would have been to rely on the fact that adding methods to gRPC services is a backwards compatible change.
This approach would allow older clients to communicate with newer plugins as the existing interface would still be provided.
This was considered but ruled out as our current framework would require any plugin that recompiles using the latest version of the framework to adapt to the new version.
Also, without specific versioned interfaces, it would require checking plugin implementations at runtime for the specific methods that are supported.
## Compatibility
This design doc aims to allow plugin API changes to be made in a manner that may provide some backwards compatibility.
Older versions of Velero will not be able to make use of new plugin versions however may continue to use previous versions of a plugin API if supported by the plugin.
All compatibility concerns are addressed earlier in the document.
## Implementation
This design document primarily outlines an approach to allow future plugin API changes to be made.
However, there are changes to the existing code base that will be made to allow plugin authors to more easily propose and introduce changes to these APIs.
* Plugin interface definitions (currently in `pkg/plugin/velero`) will be rearranged to be grouped by kind and then versioned: `pkg/plugin/velero/<plugin_kind>/<version>/`.
* Proto definitions (currently in `pkg/plugin/proto`) will be rearranged to be grouped by kind and then versioned: `pkg/plugin/proto/<plugin_kind>/<version>`.
* This will also require changes to the `make update` build task to correctly find the new proto location and output to the versioned directories.
It is anticipated that changes to the plugin APIs will be made as part of the 1.9 release cycle.
To assist with this work, an additional follow-up task to the ones listed above would be to prepare a V2 version of each of the existing plugins.
These new versions will not yet provide any new API methods but will provide a layout for new additions to be made
## Open Issues

36
go.mod
View File

@@ -5,12 +5,11 @@ go 1.17
require (
cloud.google.com/go/storage v1.10.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-sdk-for-go v61.4.0+incompatible
github.com/Azure/azure-sdk-for-go v42.0.0+incompatible
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/Azure/go-autorest/autorest v0.11.21
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8
github.com/Azure/go-autorest/autorest/to v0.3.0
github.com/apex/log v1.9.0
github.com/aws/aws-sdk-go v1.28.2
github.com/bombsimon/logrusr v1.1.0
github.com/evanphx/json-patch v4.11.0+incompatible
@@ -18,16 +17,15 @@ require (
github.com/gobwas/glob v0.2.3
github.com/gofrs/uuid v3.2.0+incompatible
github.com/golang/protobuf v1.5.2
github.com/google/go-cmp v0.5.6
github.com/google/uuid v1.2.0
github.com/hashicorp/go-hclog v0.14.1
github.com/hashicorp/go-plugin v1.4.3
github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26
github.com/joho/godotenv v1.3.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.16.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.2
github.com/prometheus/client_golang v1.11.0
github.com/robfig/cron v1.1.0
github.com/sirupsen/logrus v1.8.1
github.com/spf13/afero v1.6.0
@@ -36,9 +34,7 @@ require (
github.com/stretchr/testify v1.7.0
github.com/vmware-tanzu/crash-diagnostics v0.3.7
golang.org/x/mod v0.4.2
golang.org/x/net v0.0.0-20210525063256-abc453219eb5
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
google.golang.org/api v0.56.0
google.golang.org/grpc v1.40.0
k8s.io/api v0.22.2
@@ -48,8 +44,8 @@ require (
k8s.io/client-go v0.22.2
k8s.io/klog v1.0.0
k8s.io/kube-aggregator v0.19.12
sigs.k8s.io/cluster-api v1.0.0
sigs.k8s.io/controller-runtime v0.10.2
sigs.k8s.io/yaml v1.3.0
)
require (
@@ -62,15 +58,18 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/go-logr/zapr v0.4.0 // indirect
github.com/gobuffalo/flect v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
@@ -78,7 +77,7 @@ require (
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mattn/go-colorable v0.1.9 // indirect
github.com/mattn/go-ieproxy v0.0.1 // indirect
@@ -88,13 +87,13 @@ require (
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/oklog/run v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/stretchr/objx v0.2.0 // indirect
github.com/vladimirvivien/gexe v0.1.1 // indirect
go.opencensus.io v0.23.0 // indirect
@@ -103,11 +102,11 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 // indirect
@@ -121,6 +120,7 @@ require (
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2

144
go.sum
View File

@@ -34,6 +34,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -47,8 +48,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v61.4.0+incompatible h1:BF2Pm3aQWIa6q9KmxyF1JYKYXtVw67vtvu2Wd54NGuY=
github.com/Azure/azure-sdk-for-go v61.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v42.0.0+incompatible h1:yz6sFf5bHZ+gEOQVuK5JhPqTTAmv+OvSLSaqgzqaCwY=
github.com/Azure/azure-sdk-for-go v42.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM=
github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
@@ -95,6 +96,9 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -108,20 +112,14 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0=
github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA=
github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.28.2 h1:j5IXG9CdyLfcVfICqo1PXVv+rua+QQHbkXuvuU/JF+8=
github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@@ -133,6 +131,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bombsimon/logrusr v1.1.0 h1:Y03FI4Z/Shyrc9jF26vuaUbnPxC5NMJnTtJA/3Lihq8=
github.com/bombsimon/logrusr v1.1.0/go.mod h1:Jq0nHtvxabKE5EMwAAdgTaz7dfWE8C4i11NOltxGQpc=
@@ -141,9 +140,9 @@ github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -156,6 +155,10 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/corefile-migration v1.0.13 h1:ld5RswmH1xjqBUEukw4QxC1PakLNNoVlsZEV8FGwoV8=
github.com/coredns/corefile-migration v1.0.13/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -174,13 +177,17 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/drone/envsubst/v2 v2.0.0-20210615175204-7bf45dbf5372/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
@@ -199,10 +206,14 @@ github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMi
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@@ -210,6 +221,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -245,6 +257,8 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -288,6 +302,7 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
@@ -304,6 +319,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -347,6 +364,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@@ -356,17 +374,22 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM=
github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26 h1:sADP8l/FAtMyWJ9GIcQT/04Ae80ZZ75ogOrtW0DIZhc=
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
@@ -377,8 +400,11 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -391,8 +417,6 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
@@ -400,14 +424,12 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -429,10 +451,11 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0 h1:ipLtV9ubLEYx42YvwDa12eVPQvjuGZoPdbCozGzVNRc=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
@@ -442,39 +465,43 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
@@ -483,9 +510,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -500,6 +526,7 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -509,17 +536,19 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -529,14 +558,14 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -546,27 +575,26 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY=
github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -576,10 +604,7 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -589,6 +614,7 @@ github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
@@ -604,6 +630,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -617,13 +644,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc=
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -692,11 +712,11 @@ go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -742,7 +762,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -764,6 +783,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -791,9 +811,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -821,7 +840,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -830,6 +848,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -843,6 +862,8 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -901,9 +922,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
@@ -943,6 +963,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1033,7 +1054,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1089,7 +1109,7 @@ google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwy
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1146,6 +1166,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -1162,7 +1183,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -1187,6 +1207,7 @@ k8s.io/apimachinery v0.19.12/go.mod h1:9eb44nUQSsz9QZiilFRuMj3ZbTmoWolU8S2gnXoRM
k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk=
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apiserver v0.19.12/go.mod h1:ldZAZTNIKfMMv/UUEhk6UyTXC0/34iRdNFHo+MJOPc4=
k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4=
k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI=
k8s.io/cli-runtime v0.22.2 h1:fsd9rFk9FSaVq4SUq1fM27c8CFGsYZUJ/3BkgmjYWuY=
k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI=
@@ -1194,12 +1215,15 @@ k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
k8s.io/client-go v0.19.12/go.mod h1:BAGKQraZ6fDmXhT46pGXWZQQqN7P4E0BJux0+9O6Gt0=
k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc=
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
k8s.io/cluster-bootstrap v0.22.2 h1:jP6Nkp3CdSfr50cAn/7WGsNS52zrwMhvr0V+E3Vkh/w=
k8s.io/cluster-bootstrap v0.22.2/go.mod h1:ZkmQKprEqvrUccMnbRHISsMscA1dsQ8SffM9nHq6CgE=
k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/code-generator v0.19.12/go.mod h1:ADrDvaUQWGn4a8lX0ONtzb7uFmDRQOMSYIMk1qWIAx8=
k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
k8s.io/component-base v0.19.12/go.mod h1:tpwExE0sY3A7CwtlxGL7SnQOdQfUlnFybT6GmAD+z/s=
k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M=
k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug=
k8s.io/component-helpers v0.22.2/go.mod h1:+N61JAR9aKYSWbnLA88YcFr9K/6ISYvRNybX7QW7Rs8=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -1214,6 +1238,8 @@ k8s.io/kube-aggregator v0.19.12/go.mod h1:K76wPd03pSHEmS1FgJOcpryac5C3va4cbCvSu+
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kubectl v0.22.2/go.mod h1:BApg2j0edxLArCOfO0ievI27EeTQqBDMNU9VQH734iQ=
k8s.io/metrics v0.22.2/go.mod h1:GUcsBtpsqQD1tKFS/2wCKu4ZBowwRncLOJH1rgWs3uw=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
@@ -1223,9 +1249,13 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/cluster-api v1.0.0 h1:GcVA2ObQTXo/+jzSLWPy4Bd3NeiwJyAB8n19kyJIotA=
sigs.k8s.io/cluster-api v1.0.0/go.mod h1:V230kMSaYENTUcx1QRkoRCklb3vfphQGV3/z4ODNGWo=
sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc=
sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY=
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs=
sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go=
sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.17.11
FROM golang:1.17.6
ARG GOPROXY

View File

@@ -56,24 +56,26 @@ elif [[ "$triggeredBy" == "tags" ]]; then
TAG=$(echo $GITHUB_REF | cut -d / -f 3)
fi
TAG_LATEST=false
if [[ ! -z "$TAG" ]]; then
echo "We're building tag $TAG"
VERSION="$TAG"
if [[ "$BRANCH" == "main" ]]; then
VERSION="$BRANCH"
elif [[ ! -z "$TAG" ]]; then
# Explicitly checkout tags when building from a git tag.
# This is not needed when building from main
git fetch --tags
# Calculate the latest release if there's a tag.
highest_release
if [[ "$TAG" == "$HIGHEST" ]]; then
TAG_LATEST=true
fi
VERSION="$TAG"
else
echo "We're on branch $BRANCH"
VERSION="$BRANCH"
if [[ "$VERSION" == release-* ]]; then
VERSION=${VERSION}-dev
fi
echo "We're not on main and we're not building a tag, exit early."
exit 0
fi
# Assume we're not tagging `latest` by default, and never on main.
TAG_LATEST=false
if [[ "$BRANCH" == "main" ]]; then
echo "Building main, not tagging latest."
elif [[ "$TAG" == "$HIGHEST" ]]; then
TAG_LATEST=true
fi
if [[ -z "$BUILDX_PLATFORMS" ]]; then
@@ -85,7 +87,6 @@ echo "Highest tag found: $HIGHEST"
echo "BRANCH: $BRANCH"
echo "TAG: $TAG"
echo "TAG_LATEST: $TAG_LATEST"
echo "VERSION: $VERSION"
echo "BUILDX_PLATFORMS: $BUILDX_PLATFORMS"
echo "Building and pushing container images."

View File

@@ -38,9 +38,6 @@
# This script is meant to be a combination of documentation and executable.
# If you have questions at any point, please stop and ask!
# Fail on any error.
set -eo pipefail
# Directory in which the script itself resides, so we can use it for calling programs that are in the same directory.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
@@ -138,8 +135,8 @@ if [[ -n $release_branch_name ]]; then
remote_release_branch_name="$remote/$release_branch_name"
# Determine whether the local and remote release branches already exist
local_branch=$(git branch | { grep "$release_branch_name" || true; })
remote_branch=$(git branch -r | { grep "$remote_release_branch_name" || true;})
local_branch=$(git branch | grep "$release_branch_name")
remote_branch=$(git branch -r | grep "$remote_release_branch_name")
if [[ -z $remote_branch ]]; then
echo "The branch $remote_release_branch_name must be created before you tag the release."
exit 1

View File

@@ -43,5 +43,5 @@ fi
# but the user and group don't exist inside the container, when the code(https://github.com/kubernetes-sigs/controller-runtime/blob/v0.10.2/pkg/internal/testing/addr/manager.go#L44)
# tries to get the cache directory, it gets the directory "/" and then get the permission error when trying to create directory under "/".
# Specifying the cache directory by environment variable "XDG_CACHE_HOME" to workaround it
XDG_CACHE_HOME=/tmp/ go test -installsuffix "static" -short -timeout 60s -coverprofile=coverage.out "${TARGETS[@]}"
XDG_CACHE_HOME=/tmp/ go test -installsuffix "static" -short -timeout 60s "${TARGETS[@]}"
echo "Success!"

View File

@@ -49,11 +49,8 @@ ${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \
controller-gen \
crd:crdVersions=v1\
paths=./pkg/apis/velero/v1/... \
rbac:roleName=velero-perms \
paths=./pkg/controller/... \
output:crd:artifacts:config=config/crd/v1/bases \
object \
paths=./pkg/apis/velero/v1/...
output:crd:artifacts:config=config/crd/v1/bases
# this is a super hacky workaround for https://github.com/kubernetes/kubernetes/issues/91395
# which a result of fixing the validation on CRD objects. The validation ensures the fields that are list map keys, are either marked

View File

@@ -16,7 +16,7 @@
HACK_DIR=$(dirname "${BASH_SOURCE}")
${HACK_DIR}/update-generated-crd-code.sh
${HACK_DIR}/update-generated-crd-code.sh --verify-only
# ensure no changes to generated CRDs
if ! git diff --exit-code config/crd/v1/crds/crds.go >/dev/null; then

View File

@@ -62,17 +62,16 @@ func InvokeDeleteActions(ctx *Context) error {
dir, err := archive.NewExtractor(ctx.Log, ctx.Filesystem).UnzipAndExtractBackup(ctx.BackupReader)
if err != nil {
return errors.Wrapf(err, "error extracting backup")
}
defer ctx.Filesystem.RemoveAll(dir)
ctx.Log.Debugf("Downloaded and extracted the backup file to: %s", dir)
backupResources, err := archive.NewParser(ctx.Log, ctx.Filesystem).Parse(dir)
if existErr := errors.Is(err, archive.ErrNotExist); existErr {
ctx.Log.Debug("ignore invoking delete item actions: ", err)
return nil
} else if err != nil {
if err != nil {
return errors.Wrapf(err, "error parsing backup %q", dir)
}
processdResources := sets.NewString()
for resource := range backupResources {

View File

@@ -138,17 +138,6 @@ func TestInvokeDeleteItemActionsRunForCorrectItems(t *testing.T) {
new(recordResourcesAction).ForLabelSelector("app=app1"): {"ns-1/pod-1", "ns-2/pvc-2"},
},
},
{
name: "success if resources dir does not exist",
backup: builder.ForBackup("velero", "velero").Result(),
tarball: test.NewTarWriter(t).
Done(),
apiResources: []*test.APIResource{test.Pods(), test.PVCs()},
actions: map[*recordResourcesAction][]string{
new(recordResourcesAction).ForNamespace("ns-1").ForResource("persistentvolumeclaims"): nil,
new(recordResourcesAction).ForNamespace("ns-2").ForResource("pods"): nil,
},
},
}
for _, tc := range tests {
@@ -160,7 +149,7 @@ func TestInvokeDeleteItemActionsRunForCorrectItems(t *testing.T) {
}
// Get the plugins out of the map in order to use them.
var actions []velero.DeleteItemAction
actions := []velero.DeleteItemAction{}
for action := range tc.actions {
actions = append(actions, action)
}

View File

@@ -104,7 +104,6 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks(
groupResource schema.GroupResource,
obj runtime.Unstructured,
resourceRestoreHooks []ResourceRestoreHook,
namespaceMapping map[string]string,
) (runtime.Unstructured, error) {
// We only support hooks on pods right now
if groupResource != kuberesource.Pods {
@@ -142,13 +141,6 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks(
namespace := metadata.GetNamespace()
labels := labels.Set(metadata.GetLabels())
// Apply the hook according to the target namespace in which the pod will be restored
// more details see https://github.com/vmware-tanzu/velero/issues/4720
if namespaceMapping != nil {
if n, ok := namespaceMapping[namespace]; ok {
namespace = n
}
}
for _, rh := range resourceRestoreHooks {
if !rh.Selector.applicableTo(groupResource, namespace, labels) {
continue

View File

@@ -1395,12 +1395,11 @@ func TestGetRestoreHooksFromSpec(t *testing.T) {
func TestHandleRestoreHooks(t *testing.T) {
testCases := []struct {
name string
podInput corev1api.Pod
restoreHooks []ResourceRestoreHook
namespaceMapping map[string]string
expectedPod *corev1api.Pod
expectedError error
name string
podInput corev1api.Pod
restoreHooks []ResourceRestoreHook
expectedPod *corev1api.Pod
expectedError error
}{
{
name: "should handle hook from annotation no hooks in spec on pod with no init containers",
@@ -1841,87 +1840,6 @@ func TestHandleRestoreHooks(t *testing.T) {
},
restoreHooks: []ResourceRestoreHook{},
},
{
name: "should not apply init container when the namespace mapping is provided and the hook points to the original namespace",
podInput: corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "default",
},
Spec: corev1api.PodSpec{},
},
expectedError: nil,
expectedPod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "default",
},
Spec: corev1api.PodSpec{},
},
restoreHooks: []ResourceRestoreHook{
{
Name: "hook1",
Selector: ResourceHookSelector{
Namespaces: collections.NewIncludesExcludes().Includes("default"),
Resources: collections.NewIncludesExcludes().Includes(kuberesource.Pods.Resource),
},
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
},
},
},
},
},
},
namespaceMapping: map[string]string{"default": "new"},
},
{
name: "should apply init container when the namespace mapping is provided and the hook points to the new namespace",
podInput: corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "default",
},
Spec: corev1api.PodSpec{},
},
expectedError: nil,
expectedPod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "default",
},
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
},
},
},
restoreHooks: []ResourceRestoreHook{
{
Name: "hook1",
Selector: ResourceHookSelector{
Namespaces: collections.NewIncludesExcludes().Includes("new"),
Resources: collections.NewIncludesExcludes().Includes(kuberesource.Pods.Resource),
},
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
},
},
},
},
},
},
namespaceMapping: map[string]string{"default": "new"},
},
}
for _, tc := range testCases {
@@ -1929,7 +1847,7 @@ func TestHandleRestoreHooks(t *testing.T) {
handler := InitContainerRestoreHookHandler{}
podMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.podInput)
assert.NoError(t, err)
actual, err := handler.HandleRestoreHooks(velerotest.NewLogger(), kuberesource.Pods, &unstructured.Unstructured{Object: podMap}, tc.restoreHooks, tc.namespaceMapping)
actual, err := handler.HandleRestoreHooks(velerotest.NewLogger(), kuberesource.Pods, &unstructured.Unstructured{Object: podMap}, tc.restoreHooks)
assert.Equal(t, tc.expectedError, err)
actualPod := new(corev1api.Pod)
err = runtime.DefaultUnstructuredConverter.FromUnstructured(actual.UnstructuredContent(), actualPod)

View File

@@ -59,15 +59,6 @@ type BackupSpec struct {
// +nullable
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
// OrLabelSelectors is list of metav1.LabelSelector to filter with
// when adding individual objects to the backup. If multiple provided
// they will be joined by the OR operator. LabelSelector as well as
// OrLabelSelectors cannot co-exist in backup request, only one of them
// can be used.
// +optional
// +nullable
OrLabelSelectors []*metav1.LabelSelector `json:"orLabelSelectors,omitempty"`
// SnapshotVolumes specifies whether to take cloud snapshots
// of any PV's referenced in the set of objects included
// in the Backup.
@@ -110,12 +101,6 @@ type BackupSpec struct {
// +optional
// +nullable
OrderedResources map[string]string `json:"orderedResources,omitempty"`
// CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to
// ReadyToUse during creation, before returning error as timeout.
// The default value is 10 minute.
// +optional
CSISnapshotTimeout metav1.Duration `json:"csiSnapshotTimeout,omitempty"`
}
// BackupHooks contains custom behaviors that should be executed at different phases of the backup.
@@ -307,10 +292,6 @@ type BackupStatus struct {
// +optional
VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted,omitempty"`
// FailureReason is an error that caused the entire backup to fail.
// +optional
FailureReason string `json:"failureReason,omitempty"`
// Warnings is a count of all warning messages that were generated during
// execution of the backup. The actual warnings are in the backup's log
// file in object storage.
@@ -329,16 +310,6 @@ type BackupStatus struct {
// +optional
// +nullable
Progress *BackupProgress `json:"progress,omitempty"`
// CSIVolumeSnapshotsAttempted is the total number of attempted
// CSI VolumeSnapshots for this backup.
// +optional
CSIVolumeSnapshotsAttempted int `json:"csiVolumeSnapshotsAttempted,omitempty"`
// CSIVolumeSnapshotsCompleted is the total number of successfully
// completed CSI VolumeSnapshots for this backup.
// +optional
CSIVolumeSnapshotsCompleted int `json:"csiVolumeSnapshotsCompleted,omitempty"`
}
// BackupProgress stores information about the progress of a Backup's execution.

View File

@@ -74,10 +74,6 @@ type BackupStorageLocationStatus struct {
// +nullable
LastValidationTime *metav1.Time `json:"lastValidationTime,omitempty"`
// Message is a message about the backup storage location's status.
// +optional
Message string `json:"message,omitempty"`
// LastSyncedRevision is the value of the `metadata/revision` file in the backup
// storage location the last time the BSL's contents were synced into the cluster.
//
@@ -102,6 +98,7 @@ type BackupStorageLocationStatus struct {
// +kubebuilder:resource:shortName=bsl
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Backup Storage Location status such as Available/Unavailable"
// +kubebuilder:printcolumn:name="Last Validated",type="date",JSONPath=".status.lastValidationTime",description="LastValidationTime is the last time the backup store location was validated"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"

View File

@@ -50,14 +50,8 @@ type DeleteBackupRequestStatus struct {
Errors []string `json:"errors,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="BackupName",type="string",JSONPath=".spec.backupName",description="The name of the backup to be deleted"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="The status of the deletion request"
// DeleteBackupRequest is a request to delete one or more backups.
type DeleteBackupRequest struct {
@@ -74,7 +68,6 @@ type DeleteBackupRequest struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// DeleteBackupRequestList is a list of DeleteBackupRequests.
type DeleteBackupRequestList struct {

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -85,6 +85,7 @@ type DownloadRequestStatus struct {
// +kubebuilder:object:root=true
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// DownloadRequest is a request to download an artifact from backup object storage, such as a backup
// log file.

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright 2018 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -98,20 +98,8 @@ type PodVolumeBackupStatus struct {
Progress PodVolumeOperationProgress `json:"progress,omitempty"`
}
// TODO(2.0) After converting all resources to use the runttime-controller client,
// the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Backup status such as New/InProgress"
// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".status.startTimestamp",description="Time when this backup was started"
// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up"
// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up"
// +kubebuilder:printcolumn:name="Restic Repo",type="string",JSONPath=".spec.repoIdentifier",description="Restic repository identifier for this backup"
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:object:root=true
// +kubebuilder:object:generate=true
type PodVolumeBackup struct {
metav1.TypeMeta `json:",inline"`
@@ -126,12 +114,7 @@ type PodVolumeBackup struct {
Status PodVolumeBackupStatus `json:"status,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client,
// the k8s:deepcopy marker will no longer be needed and should be removed.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups/status,verbs=get;update;patch
// PodVolumeBackupList is a list of PodVolumeBackups.
type PodVolumeBackupList struct {

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright 2019 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -81,19 +81,8 @@ type PodVolumeRestoreStatus struct {
Progress PodVolumeOperationProgress `json:"progress,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:generate=true
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be restored"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be restored"
// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be restored"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Restore status such as New/InProgress"
// +kubebuilder:printcolumn:name="TotalBytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Pod Volume Restore status such as New/InProgress"
// +kubebuilder:printcolumn:name="BytesDone",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Pod Volume Restore status such as New/InProgress"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
type PodVolumeRestore struct {
metav1.TypeMeta `json:",inline"`
@@ -109,8 +98,6 @@ type PodVolumeRestore struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:generate=true
// +kubebuilder:object:root=true
// PodVolumeRestoreList is a list of PodVolumeRestores.
type PodVolumeRestoreList struct {

View File

@@ -64,14 +64,8 @@ type ResticRepositoryStatus struct {
LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client,
// the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
type ResticRepository struct {
metav1.TypeMeta `json:",inline"`
@@ -86,12 +80,7 @@ type ResticRepository struct {
Status ResticRepositoryStatus `json:"status,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client,
// the k8s:deepcopy marker will no longer be needed and should be removed.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:rbac:groups=velero.io,resources=resticrepositories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=resticrepositories/status,verbs=get;update;patch
// ResticRepositoryList is a list of ResticRepositories.
type ResticRepositoryList struct {

View File

@@ -71,27 +71,12 @@ type RestoreSpec struct {
// +nullable
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
// OrLabelSelectors is list of metav1.LabelSelector to filter with
// when restoring individual objects from the backup. If multiple provided
// they will be joined by the OR operator. LabelSelector as well as
// OrLabelSelectors cannot co-exist in restore request, only one of them
// can be used
// +optional
// +nullable
OrLabelSelectors []*metav1.LabelSelector `json:"orLabelSelectors,omitempty"`
// RestorePVs specifies whether to restore all included
// PVs from snapshot (via the cloudprovider).
// +optional
// +nullable
RestorePVs *bool `json:"restorePVs,omitempty"`
// RestoreStatus specifies which resources we should restore the status
// field. If nil, no objects are included. Optional.
// +optional
// +nullable
RestoreStatus *RestoreStatusSpec `json:"restoreStatus,omitempty"`
// PreserveNodePorts specifies whether to restore old nodePorts from backup.
// +optional
// +nullable
@@ -107,11 +92,6 @@ type RestoreSpec struct {
// Hooks represent custom behaviors that should be executed during or post restore.
// +optional
Hooks RestoreHooks `json:"hooks,omitempty"`
// ExistingResourcePolicy specifies the restore behaviour for the kubernetes resource to be restored
// +optional
// +nullable
ExistingResourcePolicy PolicyType `json:"existingResourcePolicy,omitempty"`
}
// RestoreHooks contains custom behaviors that should be executed during or post restore.
@@ -119,19 +99,6 @@ type RestoreHooks struct {
Resources []RestoreResourceHookSpec `json:"resources,omitempty"`
}
type RestoreStatusSpec struct {
// IncludedResources specifies the resources to which will restore the status.
// If empty, it applies to all resources.
// +optional
// +nullable
IncludedResources []string `json:"includedResources,omitempty"`
// ExcludedResources specifies the resources to which will not restore the status.
// +optional
// +nullable
ExcludedResources []string `json:"excludedResources,omitempty"`
}
// RestoreResourceHookSpec defines one or more RestoreResrouceHooks that should be executed based on
// the rules defined for namespaces, resources, and label selector.
type RestoreResourceHookSpec struct {
@@ -245,14 +212,6 @@ const (
// RestorePhaseFailed means the restore was unable to execute.
// The failing error is recorded in status.FailureReason.
RestorePhaseFailed RestorePhase = "Failed"
// PolicyTypeNone means velero will not overwrite the resource
// in cluster with the one in backup whether changed/unchanged.
PolicyTypeNone PolicyType = "none"
// PolicyTypeUpdate means velero will try to attempt a patch on
// the changed resources.
PolicyTypeUpdate PolicyType = "update"
)
// RestoreStatus captures the current status of a Velero restore
@@ -343,6 +302,3 @@ type RestoreList struct {
Items []Restore `json:"items"`
}
// PolicyType helps specify the ExistingResourcePolicy
type PolicyType string

View File

@@ -77,16 +77,8 @@ type ScheduleStatus struct {
ValidationErrors []string `json:"validationErrors,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:generate=true
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the schedule"
// +kubebuilder:printcolumn:name="Schedule",type="string",JSONPath=".spec.schedule",description="A Cron expression defining when to run the Backup"
// +kubebuilder:printcolumn:name="LastBackup",type="date",JSONPath=".status.lastBackup",description="The last time a Backup was run for this schedule"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// Schedule is a Velero resource that represents a pre-scheduled or
// periodic Backup that should be run.
@@ -104,8 +96,6 @@ type Schedule struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:generate=true
// +kubebuilder:object:root=true
// ScheduleList is a list of Schedules.
type ScheduleList struct {

View File

@@ -28,6 +28,7 @@ import (
// +kubebuilder:resource:shortName=ssr
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// ServerStatusRequest is a request to access current status information about
// the Velero server.

View File

@@ -1,14 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -18,6 +34,7 @@ func (in *Backup) DeepCopyInto(out *Backup) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup.
@@ -48,6 +65,7 @@ func (in *BackupHooks) DeepCopyInto(out *BackupHooks) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupHooks.
@@ -72,6 +90,7 @@ func (in *BackupList) DeepCopyInto(out *BackupList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList.
@@ -95,6 +114,7 @@ func (in *BackupList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupProgress) DeepCopyInto(out *BackupProgress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupProgress.
@@ -115,6 +135,7 @@ func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) {
*out = new(ExecHook)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHook.
@@ -169,6 +190,7 @@ func (in *BackupResourceHookSpec) DeepCopyInto(out *BackupResourceHookSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHookSpec.
@@ -210,17 +232,6 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.OrLabelSelectors != nil {
in, out := &in.OrLabelSelectors, &out.OrLabelSelectors
*out = make([]*metav1.LabelSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
}
if in.SnapshotVolumes != nil {
in, out := &in.SnapshotVolumes, &out.SnapshotVolumes
*out = new(bool)
@@ -250,7 +261,7 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
(*out)[key] = val
}
}
out.CSISnapshotTimeout = in.CSISnapshotTimeout
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec.
@@ -288,6 +299,7 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
*out = new(BackupProgress)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus.
@@ -307,6 +319,7 @@ func (in *BackupStorageLocation) DeepCopyInto(out *BackupStorageLocation) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocation.
@@ -339,6 +352,7 @@ func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationList.
@@ -385,6 +399,7 @@ func (in *BackupStorageLocationSpec) DeepCopyInto(out *BackupStorageLocationSpec
*out = new(metav1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationSpec.
@@ -408,6 +423,7 @@ func (in *BackupStorageLocationStatus) DeepCopyInto(out *BackupStorageLocationSt
in, out := &in.LastValidationTime, &out.LastValidationTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationStatus.
@@ -427,6 +443,7 @@ func (in *DeleteBackupRequest) DeepCopyInto(out *DeleteBackupRequest) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequest.
@@ -459,6 +476,7 @@ func (in *DeleteBackupRequestList) DeepCopyInto(out *DeleteBackupRequestList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestList.
@@ -482,6 +500,7 @@ func (in *DeleteBackupRequestList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeleteBackupRequestSpec) DeepCopyInto(out *DeleteBackupRequestSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestSpec.
@@ -502,6 +521,7 @@ func (in *DeleteBackupRequestStatus) DeepCopyInto(out *DeleteBackupRequestStatus
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestStatus.
@@ -521,6 +541,7 @@ func (in *DownloadRequest) DeepCopyInto(out *DownloadRequest) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequest.
@@ -553,6 +574,7 @@ func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestList.
@@ -577,6 +599,7 @@ func (in *DownloadRequestList) DeepCopyObject() runtime.Object {
func (in *DownloadRequestSpec) DeepCopyInto(out *DownloadRequestSpec) {
*out = *in
out.Target = in.Target
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestSpec.
@@ -596,6 +619,7 @@ func (in *DownloadRequestStatus) DeepCopyInto(out *DownloadRequestStatus) {
in, out := &in.Expiration, &out.Expiration
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestStatus.
@@ -611,6 +635,7 @@ func (in *DownloadRequestStatus) DeepCopy() *DownloadRequestStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DownloadTarget) DeepCopyInto(out *DownloadTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadTarget.
@@ -632,6 +657,7 @@ func (in *ExecHook) DeepCopyInto(out *ExecHook) {
copy(*out, *in)
}
out.Timeout = in.Timeout
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecHook.
@@ -654,6 +680,7 @@ func (in *ExecRestoreHook) DeepCopyInto(out *ExecRestoreHook) {
}
out.ExecTimeout = in.ExecTimeout
out.WaitTimeout = in.WaitTimeout
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecRestoreHook.
@@ -677,6 +704,7 @@ func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) {
}
}
out.Timeout = in.Timeout
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitRestoreHook.
@@ -699,6 +727,7 @@ func (in *Metadata) DeepCopyInto(out *Metadata) {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata.
@@ -719,6 +748,7 @@ func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) {
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation.
@@ -734,6 +764,7 @@ func (in *ObjectStorageLocation) DeepCopy() *ObjectStorageLocation {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginInfo) DeepCopyInto(out *PluginInfo) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginInfo.
@@ -753,6 +784,7 @@ func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackup.
@@ -785,6 +817,7 @@ func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupList.
@@ -816,6 +849,7 @@ func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupSpec.
@@ -840,6 +874,7 @@ func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
*out = (*in).DeepCopy()
}
out.Progress = in.Progress
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus.
@@ -855,6 +890,7 @@ func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeOperationProgress) DeepCopyInto(out *PodVolumeOperationProgress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeOperationProgress.
@@ -874,6 +910,7 @@ func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestore.
@@ -906,6 +943,7 @@ func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreList.
@@ -930,6 +968,7 @@ func (in *PodVolumeRestoreList) DeepCopyObject() runtime.Object {
func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) {
*out = *in
out.Pod = in.Pod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreSpec.
@@ -954,6 +993,7 @@ func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) {
*out = (*in).DeepCopy()
}
out.Progress = in.Progress
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus.
@@ -973,6 +1013,7 @@ func (in *ResticRepository) DeepCopyInto(out *ResticRepository) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepository.
@@ -1005,6 +1046,7 @@ func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryList.
@@ -1029,6 +1071,7 @@ func (in *ResticRepositoryList) DeepCopyObject() runtime.Object {
func (in *ResticRepositorySpec) DeepCopyInto(out *ResticRepositorySpec) {
*out = *in
out.MaintenanceFrequency = in.MaintenanceFrequency
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositorySpec.
@@ -1048,6 +1091,7 @@ func (in *ResticRepositoryStatus) DeepCopyInto(out *ResticRepositoryStatus) {
in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryStatus.
@@ -1067,6 +1111,7 @@ func (in *Restore) DeepCopyInto(out *Restore) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Restore.
@@ -1097,6 +1142,7 @@ func (in *RestoreHooks) DeepCopyInto(out *RestoreHooks) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreHooks.
@@ -1121,6 +1167,7 @@ func (in *RestoreList) DeepCopyInto(out *RestoreList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreList.
@@ -1144,6 +1191,7 @@ func (in *RestoreList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RestoreProgress) DeepCopyInto(out *RestoreProgress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreProgress.
@@ -1169,6 +1217,7 @@ func (in *RestoreResourceHook) DeepCopyInto(out *RestoreResourceHook) {
*out = new(InitRestoreHook)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResourceHook.
@@ -1216,6 +1265,7 @@ func (in *RestoreResourceHookSpec) DeepCopyInto(out *RestoreResourceHookSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResourceHookSpec.
@@ -1263,27 +1313,11 @@ func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) {
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.OrLabelSelectors != nil {
in, out := &in.OrLabelSelectors, &out.OrLabelSelectors
*out = make([]*metav1.LabelSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
}
if in.RestorePVs != nil {
in, out := &in.RestorePVs, &out.RestorePVs
*out = new(bool)
**out = **in
}
if in.RestoreStatus != nil {
in, out := &in.RestoreStatus, &out.RestoreStatus
*out = new(RestoreStatusSpec)
(*in).DeepCopyInto(*out)
}
if in.PreserveNodePorts != nil {
in, out := &in.PreserveNodePorts, &out.PreserveNodePorts
*out = new(bool)
@@ -1295,6 +1329,7 @@ func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) {
**out = **in
}
in.Hooks.DeepCopyInto(&out.Hooks)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec.
@@ -1328,6 +1363,7 @@ func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) {
*out = new(RestoreProgress)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus.
@@ -1340,31 +1376,6 @@ func (in *RestoreStatus) DeepCopy() *RestoreStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RestoreStatusSpec) DeepCopyInto(out *RestoreStatusSpec) {
*out = *in
if in.IncludedResources != nil {
in, out := &in.IncludedResources, &out.IncludedResources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExcludedResources != nil {
in, out := &in.ExcludedResources, &out.ExcludedResources
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatusSpec.
func (in *RestoreStatusSpec) DeepCopy() *RestoreStatusSpec {
if in == nil {
return nil
}
out := new(RestoreStatusSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Schedule) DeepCopyInto(out *Schedule) {
*out = *in
@@ -1372,6 +1383,7 @@ func (in *Schedule) DeepCopyInto(out *Schedule) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule.
@@ -1404,6 +1416,7 @@ func (in *ScheduleList) DeepCopyInto(out *ScheduleList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList.
@@ -1433,6 +1446,7 @@ func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) {
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec.
@@ -1457,6 +1471,7 @@ func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus.
@@ -1476,6 +1491,7 @@ func (in *ServerStatusRequest) DeepCopyInto(out *ServerStatusRequest) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequest.
@@ -1508,6 +1524,7 @@ func (in *ServerStatusRequestList) DeepCopyInto(out *ServerStatusRequestList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestList.
@@ -1531,6 +1548,7 @@ func (in *ServerStatusRequestList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerStatusRequestSpec) DeepCopyInto(out *ServerStatusRequestSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestSpec.
@@ -1555,6 +1573,7 @@ func (in *ServerStatusRequestStatus) DeepCopyInto(out *ServerStatusRequestStatus
*out = make([]PluginInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestStatus.
@@ -1575,6 +1594,7 @@ func (in *StorageType) DeepCopyInto(out *StorageType) {
*out = new(ObjectStorageLocation)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageType.
@@ -1594,6 +1614,7 @@ func (in *VolumeSnapshotLocation) DeepCopyInto(out *VolumeSnapshotLocation) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocation.
@@ -1626,6 +1647,7 @@ func (in *VolumeSnapshotLocationList) DeepCopyInto(out *VolumeSnapshotLocationLi
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationList.
@@ -1656,6 +1678,7 @@ func (in *VolumeSnapshotLocationSpec) DeepCopyInto(out *VolumeSnapshotLocationSp
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationSpec.
@@ -1671,6 +1694,7 @@ func (in *VolumeSnapshotLocationSpec) DeepCopy() *VolumeSnapshotLocationSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSnapshotLocationStatus) DeepCopyInto(out *VolumeSnapshotLocationStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationStatus.

View File

@@ -29,8 +29,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
)
var ErrNotExist = errors.New("does not exist")
// Parser traverses an extracted archive on disk to validate
// it and provide a helpful representation of it to consumers.
type Parser struct {
@@ -68,9 +66,17 @@ func (p *Parser) Parse(dir string) (map[string]*ResourceItems, error) {
// ensure top-level "resources" directory exists, and read subdirectories
// of it, where each one is expected to correspond to a resource.
resourcesDir := filepath.Join(dir, velerov1api.ResourcesDir)
resourceDirs, err := p.checkAndReadDir(resourcesDir)
exists, err := p.fs.DirExists(resourcesDir)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "error checking for existence of directory %q", strings.TrimPrefix(resourcesDir, dir+"/"))
}
if !exists {
return nil, errors.Errorf("directory %q does not exist", strings.TrimPrefix(resourcesDir, dir+"/"))
}
resourceDirs, err := p.fs.ReadDir(resourcesDir)
if err != nil {
return nil, errors.Wrapf(err, "error reading contents of directory %q", strings.TrimPrefix(resourcesDir, dir+"/"))
}
// loop through each subdirectory (one per resource) and assemble
@@ -167,15 +173,15 @@ func (p *Parser) getResourceItemsForScope(dir, archiveRootDir string) ([]string,
func (p *Parser) checkAndReadDir(dir string) ([]os.FileInfo, error) {
exists, err := p.fs.DirExists(dir)
if err != nil {
return nil, errors.Wrapf(err, "error checking for existence of directory %q", filepath.ToSlash(dir))
return []os.FileInfo{}, errors.Wrapf(err, "finding %q", dir)
}
if !exists {
return nil, errors.Wrapf(ErrNotExist, "directory %q", filepath.ToSlash(dir))
return []os.FileInfo{}, errors.Errorf("%q not found", dir)
}
contents, err := p.fs.ReadDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "reading contents of %q", filepath.ToSlash(dir))
return []os.FileInfo{}, errors.Wrapf(err, "reading contents of %q", dir)
}
return contents, nil

View File

@@ -32,13 +32,13 @@ func TestParse(t *testing.T) {
name string
files []string
dir string
wantErrMsg error
wantErrMsg string
want map[string]*ResourceItems
}{
{
name: "when there is no top-level resources directory, an error is returned",
dir: "root-dir",
wantErrMsg: ErrNotExist,
wantErrMsg: "directory \"resources\" does not exist",
},
{
name: "when there are no directories under the resources directory, an empty map is returned",
@@ -109,8 +109,8 @@ func TestParse(t *testing.T) {
}
res, err := p.Parse(tc.dir)
if tc.wantErrMsg != nil {
assert.ErrorIs(t, err, tc.wantErrMsg, "Error should be: %v, got: %v", tc.wantErrMsg, err)
if tc.wantErrMsg != "" {
assert.EqualError(t, err, tc.wantErrMsg)
} else {
assert.Nil(t, err)
assert.Equal(t, tc.want, res)
@@ -124,13 +124,13 @@ func TestParseGroupVersions(t *testing.T) {
name string
files []string
backupDir string
wantErrMsg error
wantErrMsg string
want map[string]metav1.APIGroup
}{
{
name: "when there is no top-level resources directory, an error is returned",
backupDir: "/var/folders",
wantErrMsg: ErrNotExist,
wantErrMsg: "\"/var/folders/resources\" not found",
},
{
name: "when there are no directories under the resources directory, an empty map is returned",
@@ -223,8 +223,8 @@ func TestParseGroupVersions(t *testing.T) {
}
res, err := p.ParseGroupVersions(tc.backupDir)
if tc.wantErrMsg != nil {
assert.ErrorIs(t, err, tc.wantErrMsg, "Error should be: %v, got: %v", tc.wantErrMsg, err)
if tc.wantErrMsg != "" {
assert.EqualError(t, err, tc.wantErrMsg)
} else {
assert.Nil(t, err)
assert.Equal(t, tc.want, res)

View File

@@ -1,5 +1,5 @@
/*
Copyright the Velero Contributors.
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -47,7 +47,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/podexec"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/collections"
)
@@ -201,7 +200,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
backupRequest.ResourceIncludesExcludes = collections.GetResourceIncludesExcludes(kb.discoveryHelper, backupRequest.Spec.IncludedResources, backupRequest.Spec.ExcludedResources)
log.Infof("Including resources: %s", backupRequest.ResourceIncludesExcludes.IncludesString())
log.Infof("Excluding resources: %s", backupRequest.ResourceIncludesExcludes.ExcludesString())
log.Infof("Backing up all pod volumes using Restic: %t", boolptr.IsSetToTrue(backupRequest.Backup.Spec.DefaultVolumesToRestic))
log.Infof("Backing up all pod volumes using restic: %t", *backupRequest.Backup.Spec.DefaultVolumesToRestic)
var err error
backupRequest.ResourceHooks, err = getResourceHooks(backupRequest.Spec.Hooks.Resources, kb.discoveryHelper)

View File

@@ -331,35 +331,6 @@ func TestBackupResourceFiltering(t *testing.T) {
"resources/persistentvolumes/v1-preferredversion/cluster/bar.json",
},
},
{
name: "OrLabelSelector only backs up matching resources",
backup: defaultBackup().
OrLabelSelector([]*metav1.LabelSelector{{MatchLabels: map[string]string{"a1": "b1"}}, {MatchLabels: map[string]string{"a2": "b2"}},
{MatchLabels: map[string]string{"a3": "b3"}}, {MatchLabels: map[string]string{"a4": "b4"}}}).
Result(),
apiResources: []*test.APIResource{
test.Pods(
builder.ForPod("foo", "bar").ObjectMeta(builder.WithLabels("a1", "b1")).Result(),
builder.ForPod("zoo", "raz").Result(),
),
test.Deployments(
builder.ForDeployment("foo", "bar").Result(),
builder.ForDeployment("zoo", "raz").ObjectMeta(builder.WithLabels("a2", "b2")).Result(),
),
test.PVs(
builder.ForPersistentVolume("bar").ObjectMeta(builder.WithLabels("a4", "b4")).Result(),
builder.ForPersistentVolume("baz").ObjectMeta(builder.WithLabels("a5", "b5")).Result(),
),
},
want: []string{
"resources/pods/namespaces/foo/bar.json",
"resources/deployments.apps/namespaces/zoo/raz.json",
"resources/persistentvolumes/cluster/bar.json",
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
"resources/persistentvolumes/v1-preferredversion/cluster/bar.json",
},
},
{
name: "resources with velero.io/exclude-from-backup=true label are not included",
backup: defaultBackup().

View File

@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
@@ -39,7 +38,6 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/discovery"
"github.com/vmware-tanzu/velero/pkg/features"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/restic"
@@ -386,7 +384,6 @@ const (
awsEbsCsiZoneKey = "topology.ebs.csi.aws.com/zone"
azureCsiZoneKey = "topology.disk.csi.azure.com/zone"
gkeCsiZoneKey = "topology.gke.io/zone"
gkeZoneSeparator = "__"
)
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
@@ -416,12 +413,6 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
}
}
// #4758 Do not take snapshot for CSI PV to avoid duplicated snapshotting, when CSI feature is enabled.
if features.IsEnabled(velerov1api.CSIFeatureFlag) && pv.Spec.CSI != nil {
log.Infof("Skipping snapshot of persistent volume %s, because it's handled by CSI plugin.", pv.Name)
return nil
}
// TODO: -- once failure-domain.beta.kubernetes.io/zone is no longer
// supported in any velero-supported version of Kubernetes, remove fallback checking of it
pvFailureDomainZone, labelFound := pv.Labels[zoneLabel]
@@ -548,27 +539,15 @@ func zoneFromPVNodeAffinity(res *corev1api.PersistentVolume, topologyKeys ...str
return "", ""
}
keySet := sets.NewString(topologyKeys...)
providerGke := false
zones := make([]string, 0)
for _, term := range nodeAffinity.Required.NodeSelectorTerms {
if term.MatchExpressions == nil {
continue
}
for _, exp := range term.MatchExpressions {
if keySet.Has(exp.Key) && exp.Operator == "In" && len(exp.Values) > 0 {
if exp.Key == gkeCsiZoneKey {
providerGke = true
zones = append(zones, exp.Values[0])
} else {
return exp.Key, exp.Values[0]
}
return exp.Key, exp.Values[0]
}
}
}
if providerGke {
return gkeCsiZoneKey, strings.Join(zones, gkeZoneSeparator)
}
return "", ""
}

View File

@@ -131,36 +131,6 @@ func Test_zoneFromPVNodeAffinity(t *testing.T) {
wantKey: "topology.disk.csi.azure.com/zone",
wantValue: "us-central",
},
{
/* an valid example of node affinity in a GKE's regional PV
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: topology.gke.io/zone
operator: In
values:
- us-central1-a
- matchExpressions:
- key: topology.gke.io/zone
operator: In
values:
- us-central1-c
*/
name: "Volume with multiple valid keys, and provider is gke, returns all valid entries's first zone value",
pv: builder.ForPersistentVolume("multi-matching-pv").NodeAffinityRequired(
builder.ForNodeSelector(
*builder.NewNodeSelectorTermBuilder().WithMatchExpression("topology.gke.io/zone",
"In", "us-central1-c").Result(),
*builder.NewNodeSelectorTermBuilder().WithMatchExpression("topology.gke.io/zone",
"In", "us-east-2c", "us-east-2b").Result(),
*builder.NewNodeSelectorTermBuilder().WithMatchExpression("topology.gke.io/zone",
"In", "europe-north1-a").Result(),
).Result(),
).Result(),
wantKey: "topology.gke.io/zone",
wantValue: "us-central1-c__us-east-2c__europe-north1-a",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -291,44 +291,53 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
continue
}
var orLabelSelectors []string
if r.backupRequest.Spec.OrLabelSelectors != nil {
for _, s := range r.backupRequest.Spec.OrLabelSelectors {
orLabelSelectors = append(orLabelSelectors, metav1.FormatLabelSelector(s))
}
} else {
orLabelSelectors = []string{}
}
log.Info("Listing items")
unstructuredItems := make([]unstructured.Unstructured, 0)
// Listing items for orLabelSelectors
errListingForNS := false
for _, label := range orLabelSelectors {
unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, label, resourceClient)
if err != nil {
errListingForNS = true
}
}
if errListingForNS {
log.WithError(err).Error("Error listing items")
continue
}
var labelSelector string
if selector := r.backupRequest.Spec.LabelSelector; selector != nil {
labelSelector = metav1.FormatLabelSelector(selector)
}
// Listing items for labelSelector (singular)
if len(orLabelSelectors) == 0 {
unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, labelSelector, resourceClient)
log.Info("Listing items")
unstructuredItems := make([]unstructured.Unstructured, 0)
if r.pageSize > 0 {
// If limit is positive, use a pager to split list over multiple requests
// Use Velero's dynamic list function instead of the default
listPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
return resourceClient.List(opts)
}))
// Use the page size defined in the server config
// TODO allow configuration of page buffer size
listPager.PageSize = int64(r.pageSize)
// Add each item to temporary slice
list, paginated, err := listPager.List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
log.WithError(err).Error("Error listing items")
log.WithError(errors.WithStack(err)).Error("Error listing resources")
continue
}
if !paginated {
log.Infof("list for groupResource %s was not paginated", gr)
}
err = meta.EachListItem(list, func(object runtime.Object) error {
u, ok := object.(*unstructured.Unstructured)
if !ok {
log.WithError(errors.WithStack(fmt.Errorf("expected *unstructured.Unstructured but got %T", u))).Error("unable to understand entry in the list")
return fmt.Errorf("expected *unstructured.Unstructured but got %T", u)
}
unstructuredItems = append(unstructuredItems, *u)
return nil
})
if err != nil {
log.WithError(errors.WithStack(err)).Error("unable to understand paginated list")
continue
}
} else {
// If limit is not positive, do not use paging. Instead, request all items at once
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error listing items")
continue
}
unstructuredItems = append(unstructuredItems, unstructuredList.Items...)
}
log.Infof("Retrieved %d items", len(unstructuredItems))
@@ -458,60 +467,3 @@ func newCohabitatingResource(resource, group1, group2 string) *cohabitatingResou
seen: false,
}
}
// function to process pager client calls when the pageSize is specified
func (r *itemCollector) processPagerClientCalls(gr schema.GroupResource, label string, resourceClient client.Dynamic) (runtime.Object, error) {
// If limit is positive, use a pager to split list over multiple requests
// Use Velero's dynamic list function instead of the default
listPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
return resourceClient.List(opts)
}))
// Use the page size defined in the server config
// TODO allow configuration of page buffer size
listPager.PageSize = int64(r.pageSize)
// Add each item to temporary slice
list, paginated, err := listPager.List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
r.log.WithError(errors.WithStack(err)).Error("Error listing resources")
return list, err
}
if !paginated {
r.log.Infof("list for groupResource %s was not paginated", gr)
}
return list, nil
}
func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstructured, gr schema.GroupResource, label string, resourceClient client.Dynamic) ([]unstructured.Unstructured, error) {
if r.pageSize > 0 {
// process pager client calls
list, err := r.processPagerClientCalls(gr, label, resourceClient)
if err != nil {
return unstructuredItems, err
}
err = meta.EachListItem(list, func(object runtime.Object) error {
u, ok := object.(*unstructured.Unstructured)
if !ok {
r.log.WithError(errors.WithStack(fmt.Errorf("expected *unstructured.Unstructured but got %T", u))).Error("unable to understand entry in the list")
return fmt.Errorf("expected *unstructured.Unstructured but got %T", u)
}
unstructuredItems = append(unstructuredItems, *u)
return nil
})
if err != nil {
r.log.WithError(errors.WithStack(err)).Error("unable to understand paginated list")
return unstructuredItems, err
}
} else {
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: label})
if err != nil {
r.log.WithError(errors.WithStack(err)).Error("Error listing items")
return unstructuredItems, err
}
unstructuredItems = append(unstructuredItems, unstructuredList.Items...)
}
return unstructuredItems, nil
}

View File

@@ -1,5 +1,5 @@
/*
Copyright the Velero contributors.
Copyright 2017 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -56,20 +56,12 @@ func (a *PodAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
return nil, nil, errors.WithStack(err)
}
var additionalItems []velero.ResourceIdentifier
if pod.Spec.PriorityClassName != "" {
a.log.Infof("Adding priorityclass %s to additionalItems", pod.Spec.PriorityClassName)
additionalItems = append(additionalItems, velero.ResourceIdentifier{
GroupResource: kuberesource.PriorityClasses,
Name: pod.Spec.PriorityClassName,
})
}
if len(pod.Spec.Volumes) == 0 {
a.log.Info("pod has no volumes")
return item, additionalItems, nil
return item, nil, nil
}
var additionalItems []velero.ResourceIdentifier
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName != "" {
a.log.Infof("Adding pvc %s to additionalItems", volume.PersistentVolumeClaim.ClaimName)

View File

@@ -1,5 +1,5 @@
/*
Copyright the Velero contributors.
Copyright 2018 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -115,25 +115,6 @@ func TestPodActionExecute(t *testing.T) {
{GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "foo", Name: "claim2"},
},
},
{
name: "test priority class",
pod: velerotest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"namespace": "foo",
"name": "bar"
},
"spec": {
"priorityClassName": "testPriorityClass"
}
}
`),
expected: []velero.ResourceIdentifier{
{GroupResource: kuberesource.PriorityClasses, Name: "testPriorityClass"},
},
},
}
for _, test := range tests {

View File

@@ -30,8 +30,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery"
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
)
@@ -39,14 +37,13 @@ import (
// RemapCRDVersionAction inspects CustomResourceDefinition and decides if it is a v1
// CRD that needs to be backed up as v1beta1.
type RemapCRDVersionAction struct {
logger logrus.FieldLogger
betaCRDClient apiextv1beta1client.CustomResourceDefinitionInterface
discoveryHelper velerodiscovery.Helper
logger logrus.FieldLogger
betaCRDClient apiextv1beta1client.CustomResourceDefinitionInterface
}
// NewRemapCRDVersionAction instantiates a new RemapCRDVersionAction plugin.
func NewRemapCRDVersionAction(logger logrus.FieldLogger, betaCRDClient apiextv1beta1client.CustomResourceDefinitionInterface, discoveryHelper velerodiscovery.Helper) *RemapCRDVersionAction {
return &RemapCRDVersionAction{logger: logger, betaCRDClient: betaCRDClient, discoveryHelper: discoveryHelper}
func NewRemapCRDVersionAction(logger logrus.FieldLogger, betaCRDClient apiextv1beta1client.CustomResourceDefinitionInterface) *RemapCRDVersionAction {
return &RemapCRDVersionAction{logger: logger, betaCRDClient: betaCRDClient}
}
// AppliesTo selects the resources the plugin should run against. In this case, CustomResourceDefinitions.
@@ -71,26 +68,7 @@ func (a *RemapCRDVersionAction) Execute(item runtime.Unstructured, backup *v1.Ba
return item, nil, nil
}
// This plugin will exit if the CRD was installed via v1beta1 but the cluster does not support v1beta1 CRD
supportv1b1 := false
CheckVersion:
for _, g := range a.discoveryHelper.APIGroups() {
if g.Name == apiextv1.GroupName {
for _, v := range g.Versions {
if v.Version == apiextv1beta1.SchemeGroupVersion.Version {
supportv1b1 = true
break CheckVersion
}
}
}
}
if !supportv1b1 {
a.logger.Info("Exiting RemapCRDVersionAction, the cluster does not support v1beta1 CRD")
return item, nil, nil
}
// We've got a v1 CRD and the cluster supports v1beta1 CRD, so proceed.
// We've got a v1 CRD, so proceed.
var crd apiextv1.CustomResourceDefinition
// Do not use runtime.DefaultUnstructuredConverter.FromUnstructured here because it has a bug when converting integers/whole

View File

@@ -32,8 +32,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery"
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/builder"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
@@ -50,7 +48,8 @@ func TestRemapCRDVersionAction(t *testing.T) {
c := b.Result()
_, err := betaClient.Create(context.TODO(), c, metav1.CreateOptions{})
require.NoError(t, err)
a := NewRemapCRDVersionAction(velerotest.NewLogger(), betaClient, fakeDiscoveryHelper())
a := NewRemapCRDVersionAction(velerotest.NewLogger(), betaClient)
t.Run("Test a v1 CRD without any Schema information", func(t *testing.T) {
b := builder.ForV1CustomResourceDefinition("test.velero.io")
@@ -110,33 +109,6 @@ func TestRemapCRDVersionAction(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "apiextensions.k8s.io/v1beta1", item.UnstructuredContent()["apiVersion"])
})
t.Run("When the cluster only supports v1 CRD, v1 CRD will be returned even the input has Spec.PreserveUnknownFields set to true (issue 4080)", func(t *testing.T) {
a.discoveryHelper = &velerotest.FakeDiscoveryHelper{
APIGroupsList: []metav1.APIGroup{
{
Name: apiextv1.GroupName,
Versions: []metav1.GroupVersionForDiscovery{
{
Version: apiextv1.SchemeGroupVersion.Version,
},
},
},
},
}
b := builder.ForV1CustomResourceDefinition("test.velero.io")
b.PreserveUnknownFields(true)
c := b.Result()
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&c)
require.NoError(t, err)
item, _, err := a.Execute(&unstructured.Unstructured{Object: obj}, backup)
require.NoError(t, err)
assert.Equal(t, "apiextensions.k8s.io/v1", item.UnstructuredContent()["apiVersion"])
// set it back to the default one
a.discoveryHelper = fakeDiscoveryHelper()
})
}
// TestRemapCRDVersionActionData tests the RemapCRDVersionAction plugin against actual CRD to confirm that the v1beta1 version is returned when the v1 version is passed in to the plugin.
@@ -144,7 +116,8 @@ func TestRemapCRDVersionActionData(t *testing.T) {
backup := &v1.Backup{}
clientset := apiextfakes.NewSimpleClientset()
betaClient := clientset.ApiextensionsV1beta1().CustomResourceDefinitions()
a := NewRemapCRDVersionAction(velerotest.NewLogger(), betaClient, fakeDiscoveryHelper())
a := NewRemapCRDVersionAction(velerotest.NewLogger(), betaClient)
tests := []struct {
crd string
@@ -219,21 +192,3 @@ func TestRemapCRDVersionActionData(t *testing.T) {
}
}
func fakeDiscoveryHelper() velerodiscovery.Helper {
return &velerotest.FakeDiscoveryHelper{
APIGroupsList: []metav1.APIGroup{
{
Name: apiextv1.GroupName,
Versions: []metav1.GroupVersionForDiscovery{
{
Version: apiextv1beta1.SchemeGroupVersion.Version,
},
{
Version: apiextv1.SchemeGroupVersion.Version,
},
},
},
},
}
}

View File

@@ -20,8 +20,6 @@ import (
"fmt"
"sort"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
"github.com/vmware-tanzu/velero/internal/hook"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/plugin/framework"
@@ -50,7 +48,6 @@ type Request struct {
VolumeSnapshots []*volume.Snapshot
PodVolumeBackups []*velerov1api.PodVolumeBackup
BackedUpItems map[itemKey]struct{}
CSISnapshots []*snapshotv1api.VolumeSnapshot
}
// BackupResourceList returns the list of backed up resources grouped by the API

View File

@@ -162,12 +162,6 @@ func (b *BackupBuilder) LabelSelector(selector *metav1.LabelSelector) *BackupBui
return b
}
// OrLabelSelector sets the Backup's orLabelSelector set.
func (b *BackupBuilder) OrLabelSelector(orSelectors []*metav1.LabelSelector) *BackupBuilder {
b.object.Spec.OrLabelSelectors = orSelectors
return b
}
// SnapshotVolumes sets the Backup's "snapshot volumes" flag.
func (b *BackupBuilder) SnapshotVolumes(val bool) *BackupBuilder {
b.object.Spec.SnapshotVolumes = &val
@@ -233,9 +227,3 @@ func (b *BackupBuilder) OrderedResources(orders map[string]string) *BackupBuilde
b.object.Spec.OrderedResources = orders
return b
}
// CSISnapshotTimeout sets the Backup's CSISnapshotTimeout
func (b *BackupBuilder) CSISnapshotTimeout(timeout time.Duration) *BackupBuilder {
b.object.Spec.CSISnapshotTimeout.Duration = timeout
return b
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright 2019 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright 2019 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -63,18 +63,6 @@ func (b *PodVolumeBackupBuilder) Phase(phase velerov1api.PodVolumeBackupPhase) *
return b
}
// Node sets the PodVolumeBackup's node name.
func (b *PodVolumeBackupBuilder) Node(name string) *PodVolumeBackupBuilder {
b.object.Spec.Node = name
return b
}
// BackupStorageLocation sets the PodVolumeBackup's backup storage location.
func (b *PodVolumeBackupBuilder) BackupStorageLocation(name string) *PodVolumeBackupBuilder {
b.object.Spec.BackupStorageLocation = name
return b
}
// SnapshotID sets the PodVolumeBackup's snapshot ID.
func (b *PodVolumeBackupBuilder) SnapshotID(snapshotID string) *PodVolumeBackupBuilder {
b.object.Status.SnapshotID = snapshotID

View File

@@ -95,12 +95,6 @@ func (b *RestoreBuilder) ExcludedResources(resources ...string) *RestoreBuilder
return b
}
// ExistingResourcePolicy sets the Restore's resource policy.
func (b *RestoreBuilder) ExistingResourcePolicy(policy string) *RestoreBuilder {
b.object.Spec.ExistingResourcePolicy = velerov1api.PolicyType(policy)
return b
}
// IncludeClusterResources sets the Restore's "include cluster resources" flag.
func (b *RestoreBuilder) IncludeClusterResources(val bool) *RestoreBuilder {
b.object.Spec.IncludeClusterResources = &val
@@ -113,12 +107,6 @@ func (b *RestoreBuilder) LabelSelector(selector *metav1.LabelSelector) *RestoreB
return b
}
// OrLabelSelector sets the Restore's orLabelSelector set.
func (b *RestoreBuilder) OrLabelSelector(orSelectors []*metav1.LabelSelector) *RestoreBuilder {
b.object.Spec.OrLabelSelectors = orSelectors
return b
}
// NamespaceMappings sets the Restore's namespace mappings.
func (b *RestoreBuilder) NamespaceMappings(mapping ...string) *RestoreBuilder {
if b.object.Spec.NamespaceMapping == nil {

View File

@@ -89,11 +89,6 @@ type Deletor interface {
Delete(name string, opts metav1.DeleteOptions) error
}
// StatusUpdater updates status field of a object
type StatusUpdater interface {
UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error)
}
// Dynamic contains client methods that Velero needs for backing up and restoring resources.
type Dynamic interface {
Creator
@@ -102,7 +97,6 @@ type Dynamic interface {
Getter
Patcher
Deletor
StatusUpdater
}
// dynamicResourceClient implements Dynamic.
@@ -135,7 +129,3 @@ func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.U
func (d *dynamicResourceClient) Delete(name string, opts metav1.DeleteOptions) error {
return d.resourceClient.Delete(context.TODO(), name, opts)
}
func (d *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) {
return d.resourceClient.UpdateStatus(context.TODO(), obj, opts)
}

View File

@@ -41,6 +41,8 @@ import (
"github.com/vmware-tanzu/velero/pkg/util/collections"
)
const DefaultBackupTTL time.Duration = 30 * 24 * time.Hour
func NewCreateCommand(f client.Factory, use string) *cobra.Command {
o := NewCreateOptions()
@@ -98,13 +100,13 @@ type CreateOptions struct {
SnapshotLocations []string
FromSchedule string
OrderedResources string
CSISnapshotTimeout time.Duration
client veleroclient.Interface
}
func NewCreateOptions() *CreateOptions {
return &CreateOptions{
TTL: DefaultBackupTTL,
IncludeNamespaces: flag.NewStringArray("*"),
Labels: flag.NewMap(),
SnapshotVolumes: flag.NewOptionalBool(nil),
@@ -123,7 +125,6 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
flags.StringSliceVar(&o.SnapshotLocations, "volume-snapshot-locations", o.SnapshotLocations, "List of locations (at most one per provider) where volume snapshots should be stored.")
flags.VarP(&o.Selector, "selector", "l", "Only back up resources matching this label selector.")
flags.StringVar(&o.OrderedResources, "ordered-resources", "", "Mapping Kinds to an ordered list of specific resources of that Kind. Resource names are separated by commas and their names are in format 'namespace/resourcename'. For cluster scope resource, simply use resource name. Key-value pairs in the mapping are separated by semi-colon. Example: 'pods=ns1/pod1,ns1/pod2;persistentvolumeclaims=ns1/pvc4,ns1/pvc8'. Optional.")
flags.DurationVar(&o.CSISnapshotTimeout, "csi-snapshot-timeout", o.CSISnapshotTimeout, "How long to wait for CSI snapshot creation before timeout.")
f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup.")
// this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true"
// like a normal bool flag
@@ -290,11 +291,11 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
return nil
}
// ParseOrderedResources converts to map of Kinds to an ordered list of specific resources of that Kind.
// parseOrderedResources converts to map of Kinds to an ordered list of specific resources of that Kind.
// Resource names in the list are in format 'namespace/resourcename' and separated by commas.
// Key-value pairs in the mapping are separated by semi-colon.
// Ex: 'pods=ns1/pod1,ns1/pod2;persistentvolumeclaims=ns1/pvc4,ns1/pvc8'.
func ParseOrderedResources(orderMapStr string) (map[string]string, error) {
func parseOrderedResources(orderMapStr string) (map[string]string, error) {
entries := strings.Split(orderMapStr, ";")
if len(entries) == 0 {
return nil, fmt.Errorf("Invalid OrderedResources '%s'.", orderMapStr)
@@ -334,10 +335,9 @@ func (o *CreateOptions) BuildBackup(namespace string) (*velerov1api.Backup, erro
LabelSelector(o.Selector.LabelSelector).
TTL(o.TTL).
StorageLocation(o.StorageLocation).
VolumeSnapshotLocations(o.SnapshotLocations...).
CSISnapshotTimeout(o.CSISnapshotTimeout)
VolumeSnapshotLocations(o.SnapshotLocations...)
if len(o.OrderedResources) > 0 {
orders, err := ParseOrderedResources(o.OrderedResources)
orders, err := parseOrderedResources(o.OrderedResources)
if err != nil {
return nil, err
}

View File

@@ -19,7 +19,6 @@ package backup
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -35,8 +34,7 @@ func TestCreateOptions_BuildBackup(t *testing.T) {
o := NewCreateOptions()
o.Labels.Set("velero.io/test=true")
o.OrderedResources = "pods=p1,p2;persistentvolumeclaims=pvc1,pvc2"
orders, err := ParseOrderedResources(o.OrderedResources)
o.CSISnapshotTimeout = 20 * time.Minute
orders, err := parseOrderedResources(o.OrderedResources)
assert.NoError(t, err)
backup, err := o.BuildBackup(testNamespace)
@@ -48,7 +46,6 @@ func TestCreateOptions_BuildBackup(t *testing.T) {
SnapshotVolumes: o.SnapshotVolumes.Value,
IncludeClusterResources: o.IncludeClusterResources.Value,
OrderedResources: orders,
CSISnapshotTimeout: metav1.Duration{Duration: o.CSISnapshotTimeout},
}, backup.Spec)
assert.Equal(t, map[string]string{
@@ -103,10 +100,10 @@ func TestCreateOptions_BuildBackupFromSchedule(t *testing.T) {
}
func TestCreateOptions_OrderedResources(t *testing.T) {
orderedResources, err := ParseOrderedResources("pods= ns1/p1; ns1/p2; persistentvolumeclaims=ns2/pvc1, ns2/pvc2")
orderedResources, err := parseOrderedResources("pods= ns1/p1; ns1/p2; persistentvolumeclaims=ns2/pvc1, ns2/pvc2")
assert.NotNil(t, err)
orderedResources, err = ParseOrderedResources("pods= ns1/p1,ns1/p2 ; persistentvolumeclaims=ns2/pvc1,ns2/pvc2")
orderedResources, err = parseOrderedResources("pods= ns1/p1,ns1/p2 ; persistentvolumeclaims=ns2/pvc1,ns2/pvc2")
assert.NoError(t, err)
expectedResources := map[string]string{
@@ -115,7 +112,7 @@ func TestCreateOptions_OrderedResources(t *testing.T) {
}
assert.Equal(t, orderedResources, expectedResources)
orderedResources, err = ParseOrderedResources("pods= ns1/p1,ns1/p2 ; persistentvolumes=pv1,pv2")
orderedResources, err = parseOrderedResources("pods= ns1/p1,ns1/p2 ; persistentvolumes=pv1,pv2")
assert.NoError(t, err)
expectedMixedResources := map[string]string{

View File

@@ -24,8 +24,8 @@ import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
snapshotv1beta1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1"
snapshotv1beta1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
@@ -86,17 +86,17 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
fmt.Fprintf(os.Stderr, "error getting PodVolumeBackups for backup %s: %v\n", backup.Name, err)
}
var csiClient *snapshotv1client.Clientset
var csiClient *snapshotv1beta1client.Clientset
// declare vscList up here since it may be empty and we'll pass the empty Items field into DescribeBackup
vscList := new(snapshotv1api.VolumeSnapshotContentList)
vscList := new(snapshotv1beta1api.VolumeSnapshotContentList)
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
clientConfig, err := f.ClientConfig()
cmd.CheckError(err)
csiClient, err = snapshotv1client.NewForConfig(clientConfig)
csiClient, err = snapshotv1beta1client.NewForConfig(clientConfig)
cmd.CheckError(err)
vscList, err = csiClient.SnapshotV1().VolumeSnapshotContents().List(context.TODO(), opts)
vscList, err = csiClient.SnapshotV1beta1().VolumeSnapshotContents().List(context.TODO(), opts)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting VolumeSnapshotContent objects for backup %s: %v\n", backup.Name, err)
}

View File

@@ -66,15 +66,7 @@ $ velero completion fish > ~/.config/fish/completions/velero.fish
case "bash":
cmd.Root().GenBashCompletion(os.Stdout)
case "zsh":
// # fix #4912
// cobra does not support zsh completion ouptput used by source command
// according to https://github.com/spf13/cobra/issues/1529
// Need to append compdef manually to do that.
zshHead := "#compdef velero\ncompdef _velero velero\n"
out := os.Stdout
out.Write([]byte(zshHead))
cmd.Root().GenZshCompletion(out)
cmd.Root().GenZshCompletion(os.Stdout)
case "fish":
cmd.Root().GenFishCompletion(os.Stdout, true)
default:

View File

@@ -47,7 +47,6 @@ type InstallOptions struct {
Prefix string
ProviderName string
PodAnnotations flag.Map
PodLabels flag.Map
ServiceAccountAnnotations flag.Map
VeleroPodCPURequest string
VeleroPodMemRequest string
@@ -67,7 +66,6 @@ type InstallOptions struct {
Wait bool
UseVolumeSnapshots bool
DefaultResticMaintenanceFrequency time.Duration
GarbageCollectionFrequency time.Duration
Plugins flag.StringArray
NoDefaultBackupLocation bool
CRDsOnly bool
@@ -86,7 +84,6 @@ func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) {
flags.StringVar(&o.Image, "image", o.Image, "Image to use for the Velero and restic server pods. Optional.")
flags.StringVar(&o.Prefix, "prefix", o.Prefix, "Prefix under which all Velero data should be stored within the bucket. Optional.")
flags.Var(&o.PodAnnotations, "pod-annotations", "Annotations to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2")
flags.Var(&o.PodLabels, "pod-labels", "Labels to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2")
flags.Var(&o.ServiceAccountAnnotations, "sa-annotations", "Annotations to add to the Velero ServiceAccount. Add iam.gke.io/gcp-service-account=[GSA_NAME]@[PROJECT_NAME].iam.gserviceaccount.com for workload identity. Optional. Format is key1=value1,key2=value2")
flags.StringVar(&o.VeleroPodCPURequest, "velero-pod-cpu-request", o.VeleroPodCPURequest, `CPU request for Velero pod. A value of "0" is treated as unbounded. Optional.`)
flags.StringVar(&o.VeleroPodMemRequest, "velero-pod-mem-request", o.VeleroPodMemRequest, `Memory request for Velero pod. A value of "0" is treated as unbounded. Optional.`)
@@ -104,7 +101,6 @@ func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) {
flags.BoolVar(&o.UseRestic, "use-restic", o.UseRestic, "Create restic daemonset. Optional.")
flags.BoolVar(&o.Wait, "wait", o.Wait, "Wait for Velero deployment to be ready. Optional.")
flags.DurationVar(&o.DefaultResticMaintenanceFrequency, "default-restic-prune-frequency", o.DefaultResticMaintenanceFrequency, "How often 'restic prune' is run for restic repositories by default. Optional.")
flags.DurationVar(&o.GarbageCollectionFrequency, "garbage-collection-frequency", o.GarbageCollectionFrequency, "How often the garbage collection runs for expired backups.(default 1h)")
flags.Var(&o.Plugins, "plugins", "Plugin container images to install into the Velero Deployment")
flags.BoolVar(&o.CRDsOnly, "crds-only", o.CRDsOnly, "Only generate CustomResourceDefinition resources. Useful for updating CRDs for an existing Velero install.")
flags.StringVar(&o.CACertFile, "cacert", o.CACertFile, "File containing a certificate bundle to use when verifying TLS connections to the object store. Optional.")
@@ -120,7 +116,6 @@ func NewInstallOptions() *InstallOptions {
BackupStorageConfig: flag.NewMap(),
VolumeSnapshotConfig: flag.NewMap(),
PodAnnotations: flag.NewMap(),
PodLabels: flag.NewMap(),
ServiceAccountAnnotations: flag.NewMap(),
VeleroPodCPURequest: install.DefaultVeleroPodCPURequest,
VeleroPodMemRequest: install.DefaultVeleroPodMemRequest,
@@ -178,7 +173,6 @@ func (o *InstallOptions) AsVeleroOptions() (*install.VeleroOptions, error) {
Bucket: o.BucketName,
Prefix: o.Prefix,
PodAnnotations: o.PodAnnotations.Data(),
PodLabels: o.PodLabels.Data(),
ServiceAccountAnnotations: o.ServiceAccountAnnotations.Data(),
VeleroPodResources: veleroPodResources,
ResticPodResources: resticPodResources,
@@ -189,7 +183,6 @@ func (o *InstallOptions) AsVeleroOptions() (*install.VeleroOptions, error) {
BSLConfig: o.BackupStorageConfig.Data(),
VSLConfig: o.VolumeSnapshotConfig.Data(),
DefaultResticMaintenanceFrequency: o.DefaultResticMaintenanceFrequency,
GarbageCollectionFrequency: o.GarbageCollectionFrequency,
Plugins: o.Plugins,
NoDefaultBackupLocation: o.NoDefaultBackupLocation,
CACertData: caCertData,
@@ -398,9 +391,5 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact
return errors.New("--default-restic-prune-frequency must be non-negative")
}
if o.GarbageCollectionFrequency < 0 {
return errors.New("--garbage-collection-frequency must be non-negative")
}
return nil
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,7 +13,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
@@ -22,39 +21,41 @@ import (
"net/http"
"os"
"strings"
"time"
"github.com/apex/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/vmware-tanzu/velero/internal/util/managercontroller"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/buildinfo"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/cmd"
"github.com/vmware-tanzu/velero/pkg/cmd/util/signals"
"github.com/vmware-tanzu/velero/pkg/controller"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/restic"
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/logging"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var (
@@ -101,18 +102,45 @@ func NewServerCommand(f client.Factory) *cobra.Command {
}
type resticServer struct {
logger logrus.FieldLogger
ctx context.Context
cancelFunc context.CancelFunc
fileSystem filesystem.Interface
mgr manager.Manager
metrics *metrics.ServerMetrics
metricsAddress string
namespace string
nodeName string
kubeClient kubernetes.Interface
veleroClient clientset.Interface
veleroInformerFactory informers.SharedInformerFactory
kubeInformerFactory kubeinformers.SharedInformerFactory
podInformer cache.SharedIndexInformer
logger logrus.FieldLogger
ctx context.Context
cancelFunc context.CancelFunc
fileSystem filesystem.Interface
mgr manager.Manager
metrics *metrics.ServerMetrics
metricsAddress string
namespace string
}
func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAddress string) (*resticServer, error) {
kubeClient, err := factory.KubeClient()
if err != nil {
return nil, err
}
veleroClient, err := factory.Client()
if err != nil {
return nil, err
}
// use a stand-alone pod informer because we want to use a field selector to
// filter to only pods scheduled on this node.
podInformer := corev1informers.NewFilteredPodInformer(
kubeClient,
metav1.NamespaceAll,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("spec.nodeName=%s", os.Getenv("NODE_NAME"))
},
)
ctx, cancelFunc := context.WithCancel(context.Background())
clientConfig, err := factory.ClientConfig()
@@ -125,43 +153,29 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAd
velerov1api.AddToScheme(scheme)
v1.AddToScheme(scheme)
storagev1api.AddToScheme(scheme)
nodeName := os.Getenv("NODE_NAME")
// use a field selector to filter to only pods scheduled on this node.
cacheOption := cache.Options{
SelectorsByObject: cache.SelectorsByObject{
&v1.Pod{}: {
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
},
},
}
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
Scheme: scheme,
NewCache: cache.BuilderWithOptions(cacheOption),
Scheme: scheme,
})
if err != nil {
return nil, err
}
s := &resticServer{
logger: logger,
ctx: ctx,
cancelFunc: cancelFunc,
fileSystem: filesystem.NewFileSystem(),
mgr: mgr,
metricsAddress: metricAddress,
namespace: factory.Namespace(),
nodeName: nodeName,
kubeClient: kubeClient,
veleroClient: veleroClient,
veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, factory.Namespace(), nil),
kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0),
podInformer: podInformer,
logger: logger,
ctx: ctx,
cancelFunc: cancelFunc,
fileSystem: filesystem.NewFileSystem(),
mgr: mgr,
metricsAddress: metricAddress,
namespace: factory.Namespace(),
}
// the cache isn't initialized yet when "validatePodVolumesHostPath" is called, the client returned by the manager cannot
// be used, so we need the kube client here
client, err := factory.KubeClient()
if err != nil {
return nil, err
}
if err := s.validatePodVolumesHostPath(client); err != nil {
if err := s.validatePodVolumesHostPath(); err != nil {
return nil, err
}
@@ -181,9 +195,7 @@ func (s *resticServer) run() {
}()
s.metrics = metrics.NewResticServerMetrics()
s.metrics.RegisterAllMetrics()
s.metrics.InitResticMetricsForNode(s.nodeName)
s.markInProgressCRsFailed()
s.metrics.InitResticMetricsForNode(os.Getenv("NODE_NAME"))
s.logger.Info("Starting controllers")
@@ -197,24 +209,44 @@ func (s *resticServer) run() {
s.logger.Fatalf("Failed to create credentials file store: %v", err)
}
pvbReconciler := controller.PodVolumeBackupReconciler{
Scheme: s.mgr.GetScheme(),
Client: s.mgr.GetClient(),
Clock: clock.RealClock{},
Metrics: s.metrics,
CredsFileStore: credentialFileStore,
NodeName: s.nodeName,
FileSystem: filesystem.NewFileSystem(),
ResticExec: restic.BackupExec{},
Log: s.logger,
}
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", controller.PodVolumeBackup)
}
backupController := controller.NewPodVolumeBackupController(
s.logger,
s.veleroInformerFactory.Velero().V1().PodVolumeBackups(),
s.veleroClient.VeleroV1(),
s.podInformer,
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
s.kubeInformerFactory.Core().V1().PersistentVolumes(),
s.metrics,
s.mgr.GetClient(),
os.Getenv("NODE_NAME"),
credentialFileStore,
)
if err = controller.NewPodVolumeRestoreReconciler(s.logger, s.mgr.GetClient(), credentialFileStore).SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}
restoreController := controller.NewPodVolumeRestoreController(
s.logger,
s.veleroInformerFactory.Velero().V1().PodVolumeRestores(),
s.veleroClient.VeleroV1(),
s.podInformer,
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
s.kubeInformerFactory.Core().V1().PersistentVolumes(),
s.mgr.GetClient(),
os.Getenv("NODE_NAME"),
credentialFileStore,
)
go s.veleroInformerFactory.Start(s.ctx.Done())
go s.kubeInformerFactory.Start(s.ctx.Done())
go s.podInformer.Run(s.ctx.Done())
// TODO(2.0): presuming all controllers and resources are converted to runtime-controller
// by v2.0, the block from this line and including the `s.mgr.Start() will be
// deprecated, since the manager auto-starts all the caches. Until then, we need to start the
// cache for them manually.
// Adding the controllers to the manager will register them as a (runtime-controller) runnable,
// so the manager will ensure the cache is started and ready before all controller are started
s.mgr.Add(managercontroller.Runnable(backupController, 1))
s.mgr.Add(managercontroller.Runnable(restoreController, 1))
s.logger.Info("Controllers starting...")
@@ -225,7 +257,7 @@ func (s *resticServer) run() {
// validatePodVolumesHostPath validates that the pod volumes path contains a
// directory for each Pod running on this node
func (s *resticServer) validatePodVolumesHostPath(client kubernetes.Interface) error {
func (s *resticServer) validatePodVolumesHostPath() error {
files, err := s.fileSystem.ReadDir("/host_pods/")
if err != nil {
return errors.Wrap(err, "could not read pod volumes host path")
@@ -239,7 +271,7 @@ func (s *resticServer) validatePodVolumesHostPath(client kubernetes.Interface) e
}
}
pods, err := client.CoreV1().Pods("").List(s.ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s,status.phase=Running", s.nodeName)})
pods, err := s.kubeClient.CoreV1().Pods("").List(s.ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s,status.phase=Running", os.Getenv("NODE_NAME"))})
if err != nil {
return errors.WithStack(err)
}
@@ -269,83 +301,3 @@ func (s *resticServer) validatePodVolumesHostPath(client kubernetes.Interface) e
return nil
}
// if there is a restarting during the reconciling of pvbs/pvrs/etc, these CRs may be stuck in progress status
// markInProgressCRsFailed tries to mark the in progress CRs as failed when starting the server to avoid the issue
func (s *resticServer) markInProgressCRsFailed() {
// the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here
client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()})
if err != nil {
log.WithError(errors.WithStack(err)).Error("failed to create client")
return
}
s.markInProgressPVBsFailed(client)
s.markInProgressPVRsFailed(client)
}
func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) {
pvbs := &velerov1api.PodVolumeBackupList{}
if err := client.List(s.ctx, pvbs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil {
log.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups")
return
}
for _, pvb := range pvbs.Items {
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress {
log.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase)
continue
}
if pvb.Spec.Node != s.nodeName {
log.Debugf("the node of podvolumebackup %q is %q, not %q, skip", pvb.GetName(), pvb.Spec.Node, s.nodeName)
continue
}
original := pvb.DeepCopy()
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed
pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase)
pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName())
continue
}
log.WithField("podvolumebackup", pvb.GetName()).Warn(pvb.Status.Message)
}
}
func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) {
pvrs := &velerov1api.PodVolumeRestoreList{}
if err := client.List(s.ctx, pvrs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil {
log.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
return
}
for _, pvr := range pvrs.Items {
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
log.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
continue
}
pod := &v1.Pod{}
if err := client.Get(s.ctx, types.NamespacedName{
Namespace: pvr.Spec.Pod.Namespace,
Name: pvr.Spec.Pod.Name,
}, pod); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to get pod \"%s/%s\" of podvolumerestore %q",
pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name, pvr.GetName())
continue
}
if pod.Spec.NodeName != s.nodeName {
log.Debugf("the node of pod referenced by podvolumebackup %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName)
continue
}
original := pvr.DeepCopy()
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase)
pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
continue
}
log.WithField("podvolumerestore", pvr.GetName()).Warn(pvr.Status.Message)
}
}

View File

@@ -95,11 +95,12 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
}
s := &resticServer{
kubeClient: kubeClient,
logger: testutil.NewLogger(),
fileSystem: fs,
}
err := s.validatePodVolumesHostPath(kubeClient)
err := s.validatePodVolumesHostPath()
if tt.wantErr {
assert.Error(t, err)
} else {

View File

@@ -82,11 +82,8 @@ type CreateOptions struct {
Labels flag.Map
IncludeNamespaces flag.StringArray
ExcludeNamespaces flag.StringArray
ExistingResourcePolicy string
IncludeResources flag.StringArray
ExcludeResources flag.StringArray
StatusIncludeResources flag.StringArray
StatusExcludeResources flag.StringArray
NamespaceMappings flag.Map
Selector flag.LabelSelector
IncludeClusterResources flag.OptionalBool
@@ -100,7 +97,7 @@ func NewCreateOptions() *CreateOptions {
return &CreateOptions{
Labels: flag.NewMap(),
IncludeNamespaces: flag.NewStringArray("*"),
NamespaceMappings: flag.NewMap().WithEntryDelimiter(',').WithKeyValueDelimiter(':'),
NamespaceMappings: flag.NewMap().WithEntryDelimiter(",").WithKeyValueDelimiter(":"),
RestoreVolumes: flag.NewOptionalBool(nil),
PreserveNodePorts: flag.NewOptionalBool(nil),
IncludeClusterResources: flag.NewOptionalBool(nil),
@@ -116,9 +113,6 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
flags.Var(&o.Labels, "labels", "Labels to apply to the restore.")
flags.Var(&o.IncludeResources, "include-resources", "Resources to include in the restore, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources).")
flags.Var(&o.ExcludeResources, "exclude-resources", "Resources to exclude from the restore, formatted as resource.group, such as storageclasses.storage.k8s.io.")
flags.StringVar(&o.ExistingResourcePolicy, "existing-resource-policy", "", "Restore Policy to be used during the restore workflow, can be - none or update")
flags.Var(&o.StatusIncludeResources, "status-include-resources", "Resources to include in the restore status, formatted as resource.group, such as storageclasses.storage.k8s.io.")
flags.Var(&o.StatusExcludeResources, "status-exclude-resources", "Resources to exclude from the restore status, formatted as resource.group, such as storageclasses.storage.k8s.io.")
flags.VarP(&o.Selector, "selector", "l", "Only restore resources matching this label selector.")
f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.")
// this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true"
@@ -178,10 +172,6 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
return errors.New("Velero client is not set; unable to proceed")
}
if len(o.ExistingResourcePolicy) > 0 && !isResourcePolicyValid(o.ExistingResourcePolicy) {
return errors.New("existing-resource-policy has invalid value, it accepts only none, update as value")
}
switch {
case o.BackupName != "":
if _, err := o.client.VeleroV1().Backups(f.Namespace()).Get(context.TODO(), o.BackupName, metav1.GetOptions{}); err != nil {
@@ -274,7 +264,6 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
ExcludedNamespaces: o.ExcludeNamespaces,
IncludedResources: o.IncludeResources,
ExcludedResources: o.ExcludeResources,
ExistingResourcePolicy: api.PolicyType(o.ExistingResourcePolicy),
NamespaceMapping: o.NamespaceMappings.Data(),
LabelSelector: o.Selector.LabelSelector,
RestorePVs: o.RestoreVolumes.Value,
@@ -283,13 +272,6 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
},
}
if len([]string(o.StatusIncludeResources)) > 0 {
restore.Spec.RestoreStatus = &api.RestoreStatusSpec{
IncludedResources: o.StatusIncludeResources,
ExcludedResources: o.StatusExcludeResources,
}
}
if printed, err := output.PrintWithFormat(c, restore); printed || err != nil {
return err
}
@@ -369,10 +351,3 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
return nil
}
func isResourcePolicyValid(resourcePolicy string) bool {
if resourcePolicy == string(api.PolicyTypeNone) || resourcePolicy == string(api.PolicyTypeUpdate) {
return true
}
return false
}

View File

@@ -95,7 +95,7 @@ func NewCreateOptions() *CreateOptions {
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
o.BackupOptions.BindFlags(flags)
flags.StringVar(&o.Schedule, "schedule", o.Schedule, "A cron expression specifying a recurring schedule for this backup to run")
flags.BoolVar(&o.UseOwnerReferencesInBackup, "use-owner-references-in-backup", o.UseOwnerReferencesInBackup, "Specifies whether to use OwnerReferences on backups created by this Schedule. Notice: if set to true, when schedule is deleted, backups will be deleted too.")
flags.BoolVar(&o.UseOwnerReferencesInBackup, "use-owner-references-in-backup", o.UseOwnerReferencesInBackup, "Specifies whether to use OwnerReferences on backups created by this Schedule")
}
func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error {
@@ -111,20 +111,11 @@ func (o *CreateOptions) Complete(args []string, f client.Factory) error {
}
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
var orders map[string]string
veleroClient, err := f.Client()
if err != nil {
return err
}
if len(o.BackupOptions.OrderedResources) > 0 {
orders, err = backup.ParseOrderedResources(o.BackupOptions.OrderedResources)
if err != nil {
return err
}
}
schedule := &api.Schedule{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace(),
@@ -144,8 +135,6 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
StorageLocation: o.BackupOptions.StorageLocation,
VolumeSnapshotLocations: o.BackupOptions.SnapshotLocations,
DefaultVolumesToRestic: o.BackupOptions.DefaultVolumesToRestic.Value,
OrderedResources: orders,
CSISnapshotTimeout: metav1.Duration{Duration: o.BackupOptions.CSISnapshotTimeout},
},
Schedule: o.Schedule,
UseOwnerReferencesInBackup: &o.UseOwnerReferencesInBackup,

View File

@@ -19,10 +19,8 @@ package plugin
import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/features"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"github.com/vmware-tanzu/velero/pkg/backup"
"github.com/vmware-tanzu/velero/pkg/client"
@@ -38,10 +36,11 @@ func NewCommand(f client.Factory) *cobra.Command {
Hidden: true,
Short: "INTERNAL COMMAND ONLY - not intended to be run directly by users",
Run: func(c *cobra.Command, args []string) {
pluginServer = pluginServer.
pluginServer.
RegisterBackupItemAction("velero.io/pv", newPVBackupItemAction).
RegisterBackupItemAction("velero.io/pod", newPodBackupItemAction).
RegisterBackupItemAction("velero.io/service-account", newServiceAccountBackupItemAction(f)).
RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f)).
RegisterRestoreItemAction("velero.io/job", newJobRestoreItemAction).
RegisterRestoreItemAction("velero.io/pod", newPodRestoreItemAction).
RegisterRestoreItemAction("velero.io/restic", newResticRestoreItemAction(f)).
@@ -56,15 +55,13 @@ func NewCommand(f client.Factory) *cobra.Command {
RegisterRestoreItemAction("velero.io/crd-preserve-fields", newCRDV1PreserveUnknownFieldsItemAction).
RegisterRestoreItemAction("velero.io/change-pvc-node-selector", newChangePVCNodeSelectorItemAction(f)).
RegisterRestoreItemAction("velero.io/apiservice", newAPIServiceRestoreItemAction).
RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction)
if !features.IsEnabled(velerov1api.APIGroupVersionsFeatureFlag) {
// Do not register crd-remap-version BIA if the API Group feature flag is enabled, so that the v1 CRD can be backed up
pluginServer = pluginServer.RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f))
}
pluginServer.Serve()
RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction).
Serve()
},
}
pluginServer.BindFlags(c.Flags())
return c
}
@@ -113,16 +110,7 @@ func newRemapCRDVersionAction(f client.Factory) veleroplugin.HandlerInitializer
return nil, err
}
clientset, err := f.KubeClient()
if err != nil {
return nil, err
}
discoveryHelper, err := velerodiscovery.NewHelper(clientset.Discovery(), logger)
if err != nil {
return nil, err
}
return backup.NewRemapCRDVersionAction(logger, client.ApiextensionsV1beta1().CustomResourceDefinitions(), discoveryHelper), nil
return backup.NewRemapCRDVersionAction(logger, client.ApiextensionsV1beta1().CustomResourceDefinitions()), nil
}
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Velero Contributors.
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -45,10 +45,10 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
snapshotv1informers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions"
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
snapshotv1beta1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1"
snapshotv1beta1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
snapshotv1beta1informers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions"
snapshotv1beta1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1beta1"
"github.com/vmware-tanzu/velero/internal/credentials"
"github.com/vmware-tanzu/velero/pkg/backup"
@@ -74,7 +74,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/vmware-tanzu/velero/internal/storage"
@@ -102,8 +101,6 @@ const (
// the default TTL for a backup
defaultBackupTTL = 30 * 24 * time.Hour
defaultCSISnapshotTimeout = 10 * time.Minute
// defaultCredentialsDirectory is the path on disk where credential
// files will be written to
defaultCredentialsDirectory = "/tmp/credentials"
@@ -113,7 +110,7 @@ type serverConfig struct {
// TODO(2.0) Deprecate defaultBackupLocation
pluginDir, metricsAddress, defaultBackupLocation string
backupSyncPeriod, podVolumeOperationTimeout, resourceTerminatingTimeout time.Duration
defaultBackupTTL, storeValidationFrequency, defaultCSISnapshotTimeout time.Duration
defaultBackupTTL, storeValidationFrequency time.Duration
restoreResourcePriorities []string
defaultVolumeSnapshotLocations map[string]string
restoreOnly bool
@@ -124,7 +121,6 @@ type serverConfig struct {
profilerAddress string
formatFlag *logging.FormatFlag
defaultResticMaintenanceFrequency time.Duration
garbageCollectionFrequency time.Duration
defaultVolumesToRestic bool
}
@@ -135,7 +131,7 @@ type controllerRunInfo struct {
func NewCommand(f client.Factory) *cobra.Command {
var (
volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(':')
volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(":")
logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
config = serverConfig{
pluginDir: "/plugins",
@@ -144,7 +140,6 @@ func NewCommand(f client.Factory) *cobra.Command {
defaultVolumeSnapshotLocations: make(map[string]string),
backupSyncPeriod: defaultBackupSyncPeriod,
defaultBackupTTL: defaultBackupTTL,
defaultCSISnapshotTimeout: defaultCSISnapshotTimeout,
storeValidationFrequency: defaultStoreValidationFrequency,
podVolumeOperationTimeout: defaultPodVolumeOperationTimeout,
restoreResourcePriorities: defaultRestorePriorities,
@@ -219,7 +214,6 @@ func NewCommand(f client.Factory) *cobra.Command {
command.Flags().DurationVar(&config.resourceTerminatingTimeout, "terminating-resource-timeout", config.resourceTerminatingTimeout, "How long to wait on persistent volumes and namespaces to terminate during a restore before timing out.")
command.Flags().DurationVar(&config.defaultBackupTTL, "default-backup-ttl", config.defaultBackupTTL, "How long to wait by default before backups can be garbage collected.")
command.Flags().DurationVar(&config.defaultResticMaintenanceFrequency, "default-restic-prune-frequency", config.defaultResticMaintenanceFrequency, "How often 'restic prune' is run for restic repositories by default.")
command.Flags().DurationVar(&config.garbageCollectionFrequency, "garbage-collection-frequency", config.garbageCollectionFrequency, "How often garbage collection is run for expired backups.")
command.Flags().BoolVar(&config.defaultVolumesToRestic, "default-volumes-to-restic", config.defaultVolumesToRestic, "Backup all volumes with restic by default.")
return command
@@ -236,7 +230,7 @@ type server struct {
dynamicClient dynamic.Interface
sharedInformerFactory informers.SharedInformerFactory
csiSnapshotterSharedInformerFactory *CSIInformerFactoryWrapper
csiSnapshotClient *snapshotv1client.Clientset
csiSnapshotClient *snapshotv1beta1client.Clientset
ctx context.Context
cancelFunc context.CancelFunc
logger logrus.FieldLogger
@@ -296,9 +290,9 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
return nil, err
}
var csiSnapClient *snapshotv1client.Clientset
var csiSnapClient *snapshotv1beta1client.Clientset
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
csiSnapClient, err = snapshotv1client.NewForConfig(clientConfig)
csiSnapClient, err = snapshotv1beta1client.NewForConfig(clientConfig)
if err != nil {
cancelFunc()
return nil, err
@@ -308,7 +302,6 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
corev1api.AddToScheme(scheme)
snapshotv1api.AddToScheme(scheme)
ctrl.SetLogger(logrusr.NewLogger(logger))
@@ -382,8 +375,6 @@ func (s *server) run() error {
return err
}
markInProgressCRsFailed(s.ctx, s.mgr.GetConfig(), s.mgr.GetScheme(), s.namespace, s.logger)
if err := s.runControllers(s.config.defaultVolumeSnapshotLocations); err != nil {
return err
}
@@ -485,7 +476,6 @@ func (s *server) veleroResourcesExist() error {
// have restic restores run before controllers adopt the pods.
// - Replica sets go before deployments/other controllers so they can be explicitly
// restored and be adopted by controllers.
// - CAPI ClusterClasses go before Clusters.
// - CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic.
// Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters.
// See https://github.com/kubernetes-sigs/cluster-api/issues/4105
@@ -508,7 +498,6 @@ var defaultRestorePriorities = []string{
// to ensure that we prioritize restoring from "apps" too, since this is how they're stored
// in the backup.
"replicasets.apps",
"clusterclasses.cluster.x-k8s.io",
"clusters.cluster.x-k8s.io",
"clusterresourcesets.addons.cluster.x-k8s.io",
}
@@ -546,34 +535,32 @@ func (s *server) initRestic() error {
return nil
}
func (s *server) getCSISnapshotListers() (snapshotv1listers.VolumeSnapshotLister, snapshotv1listers.VolumeSnapshotContentLister, snapshotv1listers.VolumeSnapshotClassLister) {
func (s *server) getCSISnapshotListers() (snapshotv1beta1listers.VolumeSnapshotLister, snapshotv1beta1listers.VolumeSnapshotContentLister) {
// Make empty listers that will only be populated if CSI is properly enabled.
var vsLister snapshotv1listers.VolumeSnapshotLister
var vscLister snapshotv1listers.VolumeSnapshotContentLister
var vsClassLister snapshotv1listers.VolumeSnapshotClassLister
var vsLister snapshotv1beta1listers.VolumeSnapshotLister
var vscLister snapshotv1beta1listers.VolumeSnapshotContentLister
var err error
// If CSI is enabled, check for the CSI groups and generate the listers
// If CSI isn't enabled, return empty listers.
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
_, err = s.discoveryClient.ServerResourcesForGroupVersion(snapshotv1api.SchemeGroupVersion.String())
_, err = s.discoveryClient.ServerResourcesForGroupVersion(snapshotv1beta1api.SchemeGroupVersion.String())
switch {
case apierrors.IsNotFound(err):
// CSI is enabled, but the required CRDs aren't installed, so halt.
s.logger.Fatalf("The '%s' feature flag was specified, but CSI API group [%s] was not found.", velerov1api.CSIFeatureFlag, snapshotv1api.SchemeGroupVersion.String())
s.logger.Fatalf("The '%s' feature flag was specified, but CSI API group [%s] was not found.", velerov1api.CSIFeatureFlag, snapshotv1beta1api.SchemeGroupVersion.String())
case err == nil:
// CSI is enabled, and the resources were found.
// Instantiate the listers fully
s.logger.Debug("Creating CSI listers")
// Access the wrapped factory directly here since we've already done the feature flag check above to know it's safe.
vsLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1().VolumeSnapshots().Lister()
vscLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1().VolumeSnapshotContents().Lister()
vsClassLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1().VolumeSnapshotClasses().Lister()
vsLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1beta1().VolumeSnapshots().Lister()
vscLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1beta1().VolumeSnapshotContents().Lister()
case err != nil:
cmd.CheckError(err)
}
}
return vsLister, vscLister, vsClassLister
return vsLister, vscLister
}
func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string) error {
@@ -600,7 +587,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
backupStoreGetter := persistence.NewObjectBackupStoreGetter(s.credentialFileStore)
csiVSLister, csiVSCLister, csiVSClassLister := s.getCSISnapshotListers()
csiVSLister, csiVSCLister := s.getCSISnapshotListers()
backupSyncControllerRunInfo := func() controllerRunInfo {
backupSyncContoller := controller.NewBackupSyncController(
@@ -608,7 +595,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.mgr.GetClient(),
s.veleroClient.VeleroV1(),
s.sharedInformerFactory.Velero().V1().Backups().Lister(),
csiVSLister,
s.config.backupSyncPeriod,
s.namespace,
s.csiSnapshotClient,
@@ -653,15 +639,12 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.config.defaultBackupLocation,
s.config.defaultVolumesToRestic,
s.config.defaultBackupTTL,
s.config.defaultCSISnapshotTimeout,
s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations().Lister(),
defaultVolumeSnapshotLocations,
s.metrics,
s.config.formatFlag.Parse(),
csiVSLister,
s.csiSnapshotClient,
csiVSCLister,
csiVSClassLister,
backupStoreGetter,
)
@@ -671,6 +654,22 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
scheduleControllerRunInfo := func() controllerRunInfo {
scheduleController := controller.NewScheduleController(
s.namespace,
s.veleroClient.VeleroV1(),
s.veleroClient.VeleroV1(),
s.sharedInformerFactory.Velero().V1().Schedules(),
s.logger,
s.metrics,
)
return controllerRunInfo{
controller: scheduleController,
numWorkers: defaultControllerWorkers,
}
}
gcControllerRunInfo := func() controllerRunInfo {
gcController := controller.NewGCController(
s.logger,
@@ -678,7 +677,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.sharedInformerFactory.Velero().V1().DeleteBackupRequests().Lister(),
s.veleroClient.VeleroV1(),
s.mgr.GetClient(),
s.config.garbageCollectionFrequency,
)
return controllerRunInfo{
@@ -687,6 +685,34 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
deletionControllerRunInfo := func() controllerRunInfo {
deletionController := controller.NewBackupDeletionController(
s.logger,
s.sharedInformerFactory.Velero().V1().DeleteBackupRequests(),
s.veleroClient.VeleroV1(), // deleteBackupRequestClient
s.veleroClient.VeleroV1(), // backupClient
s.sharedInformerFactory.Velero().V1().Restores().Lister(),
s.veleroClient.VeleroV1(), // restoreClient
backupTracker,
s.resticManager,
s.sharedInformerFactory.Velero().V1().PodVolumeBackups().Lister(),
s.mgr.GetClient(),
s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations().Lister(),
csiVSLister,
csiVSCLister,
s.csiSnapshotClient,
newPluginManager,
backupStoreGetter,
s.metrics,
s.discoveryHelper,
)
return controllerRunInfo{
controller: deletionController,
numWorkers: defaultControllerWorkers,
}
}
restoreControllerRunInfo := func() controllerRunInfo {
restorer, err := restore.NewKubernetesRestorer(
s.veleroClient.VeleroV1(),
@@ -726,11 +752,30 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
resticRepoControllerRunInfo := func() controllerRunInfo {
resticRepoController := controller.NewResticRepositoryController(
s.logger,
s.sharedInformerFactory.Velero().V1().ResticRepositories(),
s.veleroClient.VeleroV1(),
s.mgr.GetClient(),
s.resticManager,
s.config.defaultResticMaintenanceFrequency,
)
return controllerRunInfo{
controller: resticRepoController,
numWorkers: defaultControllerWorkers,
}
}
enabledControllers := map[string]func() controllerRunInfo{
controller.BackupSync: backupSyncControllerRunInfo,
controller.Backup: backupControllerRunInfo,
controller.Schedule: scheduleControllerRunInfo,
controller.GarbageCollection: gcControllerRunInfo,
controller.BackupDeletion: deletionControllerRunInfo,
controller.Restore: restoreControllerRunInfo,
controller.ResticRepo: resticRepoControllerRunInfo,
}
// Note: all runtime type controllers that can be disabled are grouped separately, below:
enabledRuntimeControllers := make(map[string]struct{})
@@ -798,27 +843,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupStorageLocation)
}
if err := controller.NewScheduleReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.metrics).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", controller.Schedule)
}
if err := controller.NewResticRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.defaultResticMaintenanceFrequency, s.resticManager).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", controller.ResticRepo)
}
if err := controller.NewBackupDeletionReconciler(
s.logger,
s.mgr.GetClient(),
backupTracker,
s.resticManager,
s.metrics,
s.discoveryHelper,
newPluginManager,
backupStoreGetter,
).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupDeletion)
}
if _, ok := enabledRuntimeControllers[controller.ServerStatusRequest]; ok {
r := controller.ServerStatusRequestReconciler{
Scheme: s.mgr.GetScheme(),
@@ -863,6 +887,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
if err := s.mgr.Start(s.ctx); err != nil {
s.logger.Fatal("Problem starting manager", err)
}
return nil
}
@@ -904,16 +929,16 @@ func (s *server) runProfiler() {
// CSIInformerFactoryWrapper is a proxy around the CSI SharedInformerFactory that checks the CSI feature flag before performing operations.
type CSIInformerFactoryWrapper struct {
factory snapshotv1informers.SharedInformerFactory
factory snapshotv1beta1informers.SharedInformerFactory
}
func NewCSIInformerFactoryWrapper(c snapshotv1client.Interface) *CSIInformerFactoryWrapper {
func NewCSIInformerFactoryWrapper(c snapshotv1beta1client.Interface) *CSIInformerFactoryWrapper {
// If no namespace is specified, all namespaces are watched.
// This is desirable for VolumeSnapshots, as we want to query for all VolumeSnapshots across all namespaces using this informer
w := &CSIInformerFactoryWrapper{}
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
w.factory = snapshotv1informers.NewSharedInformerFactoryWithOptions(c, 0)
w.factory = snapshotv1beta1informers.NewSharedInformerFactoryWithOptions(c, 0)
}
return w
}
@@ -932,65 +957,3 @@ func (w *CSIInformerFactoryWrapper) WaitForCacheSync(stopCh <-chan struct{}) map
}
return nil
}
// if there is a restarting during the reconciling of backups/restores/etc, these CRs may be stuck in progress status
// markInProgressCRsFailed tries to mark the in progress CRs as failed when starting the server to avoid the issue
func markInProgressCRsFailed(ctx context.Context, cfg *rest.Config, scheme *runtime.Scheme, namespace string, log logrus.FieldLogger) {
// the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here
client, err := ctrlclient.New(cfg, ctrlclient.Options{Scheme: scheme})
if err != nil {
log.WithError(errors.WithStack(err)).Error("failed to create client")
return
}
markInProgressBackupsFailed(ctx, client, namespace, log)
markInProgressRestoresFailed(ctx, client, namespace, log)
}
func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, namespace string, log logrus.FieldLogger) {
backups := &velerov1api.BackupList{}
if err := client.List(ctx, backups, &ctrlclient.MatchingFields{"metadata.namespace": namespace}); err != nil {
log.WithError(errors.WithStack(err)).Error("failed to list backups")
return
}
for _, backup := range backups.Items {
if backup.Status.Phase != velerov1api.BackupPhaseInProgress {
log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase)
continue
}
updated := backup.DeepCopy()
updated.Status.Phase = velerov1api.BackupPhaseFailed
updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase)
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName())
continue
}
log.WithField("backup", backup.GetName()).Warn(updated.Status.FailureReason)
}
}
func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, namespace string, log logrus.FieldLogger) {
restores := &velerov1api.RestoreList{}
if err := client.List(ctx, restores, &ctrlclient.MatchingFields{"metadata.namespace": namespace}); err != nil {
log.WithError(errors.WithStack(err)).Error("failed to list restores")
return
}
for _, restore := range restores.Items {
if restore.Status.Phase != velerov1api.RestorePhaseInProgress {
log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase)
continue
}
updated := restore.DeepCopy()
updated.Status.Phase = velerov1api.RestorePhaseFailed
updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase)
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName())
continue
}
log.WithField("restore", restore.GetName()).Warn(updated.Status.FailureReason)
}
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package flag
import (
"encoding/csv"
"fmt"
"strings"
@@ -28,25 +27,25 @@ import (
// map data (i.e. a collection of key-value pairs).
type Map struct {
data map[string]string
entryDelimiter rune
keyValueDelimiter rune
entryDelimiter string
keyValueDelimiter string
}
// NewMap returns a Map using the default delimiters ('=' between keys and
// values, and ',' between map entries, e.g. k1=v1,k2=v2)
// NewMap returns a Map using the default delimiters ("=" between keys and
// values, and "," between map entries, e.g. k1=v1,k2=v2)
func NewMap() Map {
m := Map{
data: make(map[string]string),
}
return m.WithEntryDelimiter(',').WithKeyValueDelimiter('=')
return m.WithEntryDelimiter(",").WithKeyValueDelimiter("=")
}
// WithEntryDelimiter sets the delimiter to be used between map
// entries.
//
// For example, in "k1=v1&k2=v2", the entry delimiter is '&'
func (m Map) WithEntryDelimiter(delimiter rune) Map {
// For example, in "k1=v1&k2=v2", the entry delimiter is "&"
func (m Map) WithEntryDelimiter(delimiter string) Map {
m.entryDelimiter = delimiter
return m
}
@@ -54,8 +53,8 @@ func (m Map) WithEntryDelimiter(delimiter rune) Map {
// WithKeyValueDelimiter sets the delimiter to be used between
// keys and values.
//
// For example, in "k1=v1&k2=v2", the key-value delimiter is '='
func (m Map) WithKeyValueDelimiter(delimiter rune) Map {
// For example, in "k1=v1&k2=v2", the key-value delimiter is "="
func (m Map) WithKeyValueDelimiter(delimiter string) Map {
m.keyValueDelimiter = delimiter
return m
}
@@ -64,26 +63,17 @@ func (m Map) WithKeyValueDelimiter(delimiter rune) Map {
func (m *Map) String() string {
var a []string
for k, v := range m.data {
a = append(a, fmt.Sprintf("%s%s%s", k, string(m.keyValueDelimiter), v))
a = append(a, fmt.Sprintf("%s%s%s", k, m.keyValueDelimiter, v))
}
return strings.Join(a, string(m.entryDelimiter))
return strings.Join(a, m.entryDelimiter)
}
// Set parses the provided string according to the delimiters and
// assigns the result to the Map receiver. It returns an error if
// the string is not parseable.
func (m *Map) Set(s string) error {
// use csv.Reader to support parsing input string contains entry delimiters.
// e.g. `"k1=a=b,c=d",k2=v2` will be parsed into two parts: `k1=a=b,c=d` and `k2=v2`
r := csv.NewReader(strings.NewReader(s))
r.Comma = m.entryDelimiter
parts, err := r.Read()
if err != nil {
return errors.Wrapf(err, "error parsing %q", s)
}
for _, part := range parts {
kvs := strings.SplitN(part, string(m.keyValueDelimiter), 2)
for _, part := range strings.Split(s, m.entryDelimiter) {
kvs := strings.SplitN(part, m.keyValueDelimiter, 2)
if len(kvs) != 2 {
return errors.Errorf("error parsing %q", part)
}

View File

@@ -1,75 +0,0 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSetOfMap(t *testing.T) {
cases := []struct {
name string
input string
error bool
expected map[string]string
}{
{
name: "invalid_input_missing_quote",
input: `"k=v`,
error: true,
},
{
name: "invalid_input_contains_no_key_value_delimiter",
input: `k`,
error: true,
},
{
name: "valid input",
input: `k1=v1,k2=v2`,
error: false,
expected: map[string]string{
"k1": "v1",
"k2": "v2",
},
},
{
name: "valid input whose value contains entry delimiter",
input: `k1=v1,"k2=a=b,c=d"`,
error: false,
expected: map[string]string{
"k1": "v1",
"k2": "a=b,c=d",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
m := NewMap()
err := m.Set(c.input)
if c.error {
require.NotNil(t, err)
return
}
assert.EqualValues(t, c.expected, m.data)
})
}
}

View File

@@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapshotv1beta1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1"
"github.com/fatih/color"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -45,7 +45,7 @@ func DescribeBackup(
backup *velerov1api.Backup,
deleteRequests []velerov1api.DeleteBackupRequest,
podVolumeBackups []velerov1api.PodVolumeBackup,
volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent,
volumeSnapshotContents []snapshotv1beta1api.VolumeSnapshotContent,
details bool,
veleroClient clientset.Interface,
insecureSkipTLSVerify bool,
@@ -513,7 +513,7 @@ func (v *volumesByPod) Sorted() []*podVolumeGroup {
return v.volumesByPodSlice
}
func DescribeCSIVolumeSnapshots(d *Describer, details bool, volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent) {
func DescribeCSIVolumeSnapshots(d *Describer, details bool, volumeSnapshotContents []snapshotv1beta1api.VolumeSnapshotContent) {
if !features.IsEnabled(velerov1api.CSIFeatureFlag) {
return
}
@@ -535,7 +535,7 @@ func DescribeCSIVolumeSnapshots(d *Describer, details bool, volumeSnapshotConten
}
}
func DescribeVSC(d *Describer, details bool, vsc snapshotv1api.VolumeSnapshotContent) {
func DescribeVSC(d *Describer, details bool, vsc snapshotv1beta1api.VolumeSnapshotContent) {
if vsc.Status == nil {
d.Printf("Volume Snapshot Content %s cannot be described because its status is nil\n", vsc.Name)
return

View File

@@ -148,13 +148,6 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
describePodVolumeRestores(d, podVolumeRestores, details)
}
d.Println()
s = "<none>"
if restore.Spec.ExistingResourcePolicy != "" {
s = string(restore.Spec.ExistingResourcePolicy)
}
d.Printf("Existing Resource Policy: \t%s\n", s)
d.Println()
d.Printf("Preserve Service NodePorts:\t%s\n", BoolPointerString(restore.Spec.PreserveNodePorts, "false", "true", "auto"))

View File

@@ -25,30 +25,21 @@ import (
"io"
"io/ioutil"
"os"
"sync"
"time"
"github.com/apex/log"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"github.com/vmware-tanzu/velero/pkg/util/csi"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
snapshotv1beta1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1"
snapshotv1beta1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1beta1"
"github.com/vmware-tanzu/velero/internal/storage"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
@@ -87,16 +78,13 @@ type backupController struct {
defaultBackupLocation string
defaultVolumesToRestic bool
defaultBackupTTL time.Duration
defaultCSISnapshotTimeout time.Duration
snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister
defaultSnapshotLocations map[string]string
metrics *metrics.ServerMetrics
backupStoreGetter persistence.ObjectBackupStoreGetter
formatFlag logging.Format
volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister
volumeSnapshotClient *snapshotterClientSet.Clientset
volumeSnapshotContentLister snapshotv1listers.VolumeSnapshotContentLister
volumeSnapshotClassLister snapshotv1listers.VolumeSnapshotClassLister
volumeSnapshotLister snapshotv1beta1listers.VolumeSnapshotLister
volumeSnapshotContentLister snapshotv1beta1listers.VolumeSnapshotContentLister
}
func NewBackupController(
@@ -112,15 +100,12 @@ func NewBackupController(
defaultBackupLocation string,
defaultVolumesToRestic bool,
defaultBackupTTL time.Duration,
defaultCSISnapshotTimeout time.Duration,
volumeSnapshotLocationLister velerov1listers.VolumeSnapshotLocationLister,
defaultSnapshotLocations map[string]string,
metrics *metrics.ServerMetrics,
formatFlag logging.Format,
volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister,
volumeSnapshotClient *snapshotterClientSet.Clientset,
volumeSnapshotContentLister snapshotv1listers.VolumeSnapshotContentLister,
volumesnapshotClassLister snapshotv1listers.VolumeSnapshotClassLister,
volumeSnapshotLister snapshotv1beta1listers.VolumeSnapshotLister,
volumeSnapshotContentLister snapshotv1beta1listers.VolumeSnapshotContentLister,
backupStoreGetter persistence.ObjectBackupStoreGetter,
) Interface {
c := &backupController{
@@ -137,15 +122,12 @@ func NewBackupController(
defaultBackupLocation: defaultBackupLocation,
defaultVolumesToRestic: defaultVolumesToRestic,
defaultBackupTTL: defaultBackupTTL,
defaultCSISnapshotTimeout: defaultCSISnapshotTimeout,
snapshotLocationLister: volumeSnapshotLocationLister,
defaultSnapshotLocations: defaultSnapshotLocations,
metrics: metrics,
formatFlag: formatFlag,
volumeSnapshotLister: volumeSnapshotLister,
volumeSnapshotClient: volumeSnapshotClient,
volumeSnapshotContentLister: volumeSnapshotContentLister,
volumeSnapshotClassLister: volumesnapshotClassLister,
backupStoreGetter: backupStoreGetter,
}
@@ -273,7 +255,6 @@ func (c *backupController) processBackup(key string) error {
if err != nil {
return errors.Wrapf(err, "error updating Backup status to %s", request.Status.Phase)
}
// store ref to just-updated item for creating patch
original = updatedBackup
request.Backup = updatedBackup.DeepCopy()
@@ -300,7 +281,6 @@ func (c *backupController) processBackup(key string) error {
// result in the backup being Failed.
log.WithError(err).Error("backup failed")
request.Status.Phase = velerov1api.BackupPhaseFailed
request.Status.FailureReason = err.Error()
}
switch request.Status.Phase {
@@ -362,11 +342,6 @@ func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkg
request.Spec.TTL.Duration = c.defaultBackupTTL
}
if request.Spec.CSISnapshotTimeout.Duration == 0 {
// set default CSI VolumeSnapshot timeout
request.Spec.CSISnapshotTimeout.Duration = c.defaultCSISnapshotTimeout
}
// calculate expiration
request.Status.Expiration = &metav1.Time{Time: c.clock.Now().Add(request.Spec.TTL.Duration)}
@@ -454,11 +429,6 @@ func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkg
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
}
// validate that only one exists orLabelSelector or just labelSelector (singular)
if request.Spec.OrLabelSelectors != nil && request.Spec.LabelSelector != nil {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified"))
}
return request
}
@@ -632,11 +602,11 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
// Empty slices here so that they can be passed in to the persistBackup call later, regardless of whether or not CSI's enabled.
// This way, we only make the Lister call if the feature flag's on.
var volumeSnapshots []*snapshotv1api.VolumeSnapshot
var volumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent
var volumeSnapshotClasses []*snapshotv1api.VolumeSnapshotClass
var volumeSnapshots []*snapshotv1beta1api.VolumeSnapshot
var volumeSnapshotContents []*snapshotv1beta1api.VolumeSnapshotContent
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
selector := label.NewSelectorForBackup(backup.Name)
// Listers are wrapped in a nil check out of caution, since they may not be populated based on the
// EnableCSI feature flag. This is more to guard against programmer error, as they shouldn't be nil
// when EnableCSI is on.
@@ -645,13 +615,6 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
if err != nil {
backupLog.Error(err)
}
err = c.checkVolumeSnapshotReadyToUse(context.Background(), volumeSnapshots, backup.Spec.CSISnapshotTimeout.Duration)
if err != nil {
backupLog.Errorf("fail to wait VolumeSnapshot change to Ready: %s", err.Error())
}
backup.CSISnapshots = volumeSnapshots
}
if c.volumeSnapshotContentLister != nil {
@@ -660,27 +623,6 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
backupLog.Error(err)
}
}
vsClassSet := sets.NewString()
for _, vsc := range volumeSnapshotContents {
// persist the volumesnapshotclasses referenced by vsc
if c.volumeSnapshotClassLister != nil &&
vsc.Spec.VolumeSnapshotClassName != nil &&
!vsClassSet.Has(*vsc.Spec.VolumeSnapshotClassName) {
if vsClass, err := c.volumeSnapshotClassLister.Get(*vsc.Spec.VolumeSnapshotClassName); err != nil {
backupLog.Error(err)
} else {
vsClassSet.Insert(*vsc.Spec.VolumeSnapshotClassName)
volumeSnapshotClasses = append(volumeSnapshotClasses, vsClass)
}
}
if err := csi.ResetVolumeSnapshotContent(vsc); err != nil {
backupLog.Error(err)
}
}
// Delete the VolumeSnapshots created in the backup, when CSI feature is enabled.
c.deleteVolumeSnapshot(volumeSnapshots, volumeSnapshotContents, *backup, backupLog)
}
// Mark completion timestamp before serializing and uploading.
@@ -694,22 +636,15 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
}
}
backup.Status.CSIVolumeSnapshotsAttempted = len(backup.CSISnapshots)
for _, vs := range backup.CSISnapshots {
if vs.Status != nil && boolptr.IsSetToTrue(vs.Status.ReadyToUse) {
backup.Status.CSIVolumeSnapshotsCompleted++
}
}
backup.Status.Warnings = logCounter.GetCount(logrus.WarnLevel)
backup.Status.Errors = logCounter.GetCount(logrus.ErrorLevel)
recordBackupMetrics(backupLog, backup.Backup, backupFile, c.metrics)
if err := gzippedLogFile.Close(); err != nil {
c.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)).WithError(err).Error("error closing gzippedLogFile")
}
backup.Status.Warnings = logCounter.GetCount(logrus.WarnLevel)
backup.Status.Errors = logCounter.GetCount(logrus.ErrorLevel)
// Assign finalize phase as close to end as possible so that any errors
// logged to backupLog are captured. This is done before uploading the
// artifacts to object storage so that the JSON representation of the
@@ -731,7 +666,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
return err
}
if errs := persistBackup(backup, backupFile, logFile, backupStore, c.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)), volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 {
if errs := persistBackup(backup, backupFile, logFile, backupStore, c.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)), volumeSnapshots, volumeSnapshotContents); len(errs) > 0 {
fatalErrs = append(fatalErrs, errs...)
}
@@ -759,26 +694,14 @@ func recordBackupMetrics(log logrus.FieldLogger, backup *velerov1api.Backup, bac
serverMetrics.RegisterVolumeSnapshotAttempts(backupScheduleName, backup.Status.VolumeSnapshotsAttempted)
serverMetrics.RegisterVolumeSnapshotSuccesses(backupScheduleName, backup.Status.VolumeSnapshotsCompleted)
serverMetrics.RegisterVolumeSnapshotFailures(backupScheduleName, backup.Status.VolumeSnapshotsAttempted-backup.Status.VolumeSnapshotsCompleted)
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
serverMetrics.RegisterCSISnapshotAttempts(backupScheduleName, backup.Name, backup.Status.CSIVolumeSnapshotsAttempted)
serverMetrics.RegisterCSISnapshotSuccesses(backupScheduleName, backup.Name, backup.Status.CSIVolumeSnapshotsCompleted)
serverMetrics.RegisterCSISnapshotFailures(backupScheduleName, backup.Name, backup.Status.CSIVolumeSnapshotsAttempted-backup.Status.CSIVolumeSnapshotsCompleted)
}
if backup.Status.Progress != nil {
serverMetrics.RegisterBackupItemsTotalGauge(backupScheduleName, backup.Status.Progress.TotalItems)
}
serverMetrics.RegisterBackupItemsErrorsGauge(backupScheduleName, backup.Status.Errors)
}
func persistBackup(backup *pkgbackup.Request,
backupContents, backupLog *os.File,
backupStore persistence.BackupStore,
log logrus.FieldLogger,
csiVolumeSnapshots []*snapshotv1api.VolumeSnapshot,
csiVolumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent,
csiVolumesnapshotClasses []*snapshotv1api.VolumeSnapshotClass,
csiVolumeSnapshots []*snapshotv1beta1api.VolumeSnapshot,
csiVolumeSnapshotContents []*snapshotv1beta1api.VolumeSnapshotContent,
) []error {
persistErrs := []error{}
backupJSON := new(bytes.Buffer)
@@ -807,10 +730,6 @@ func persistBackup(backup *pkgbackup.Request,
if errs != nil {
persistErrs = append(persistErrs, errs...)
}
csiSnapshotClassesJSON, errs := encodeToJSONGzip(csiVolumesnapshotClasses, "csi volume snapshot classes list")
if errs != nil {
persistErrs = append(persistErrs, errs...)
}
backupResourceList, errs := encodeToJSONGzip(backup.BackupResourceList(), "backup resources list")
if errs != nil {
@@ -825,7 +744,6 @@ func persistBackup(backup *pkgbackup.Request,
backupResourceList = nil
csiSnapshotJSON = nil
csiSnapshotContentsJSON = nil
csiSnapshotClassesJSON = nil
}
backupInfo := persistence.BackupInfo{
@@ -838,7 +756,6 @@ func persistBackup(backup *pkgbackup.Request,
BackupResourceList: backupResourceList,
CSIVolumeSnapshots: csiSnapshotJSON,
CSIVolumeSnapshotContents: csiSnapshotContentsJSON,
CSIVolumeSnapshotClasses: csiSnapshotClassesJSON,
}
if err := backupStore.PutBackup(backupInfo); err != nil {
persistErrs = append(persistErrs, err)
@@ -882,152 +799,3 @@ func encodeToJSONGzip(data interface{}, desc string) (*bytes.Buffer, []error) {
return buf, nil
}
// Waiting for VolumeSnapshot ReadyTosue to true is time consuming. Try to make the process parallel by
// using goroutine here instead of waiting in CSI plugin, because it's not easy to make BackupItemAction
// parallel by now. After BackupItemAction parallel is implemented, this logic should be moved to CSI plugin
// as https://github.com/vmware-tanzu/velero-plugin-for-csi/pull/100
func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, volumesnapshots []*snapshotv1api.VolumeSnapshot,
csiSnapshotTimeout time.Duration) error {
eg, _ := errgroup.WithContext(ctx)
timeout := csiSnapshotTimeout
interval := 5 * time.Second
for _, vs := range volumesnapshots {
volumeSnapshot := vs
eg.Go(func() error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
tmpVS, err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots(volumeSnapshot.Namespace).Get(ctx, volumeSnapshot.Name, metav1.GetOptions{})
if err != nil {
return false, errors.Wrapf(err, fmt.Sprintf("failed to get volumesnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name))
}
if tmpVS.Status == nil || tmpVS.Status.BoundVolumeSnapshotContentName == nil || !boolptr.IsSetToTrue(tmpVS.Status.ReadyToUse) {
log.Infof("Waiting for CSI driver to reconcile volumesnapshot %s/%s. Retrying in %ds", volumeSnapshot.Namespace, volumeSnapshot.Name, interval/time.Second)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
log.Errorf("Timed out awaiting reconciliation of volumesnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name)
}
return err
})
}
return eg.Wait()
}
// deleteVolumeSnapshot delete VolumeSnapshot created during backup.
// This is used to avoid deleting namespace in cluster triggers the VolumeSnapshot deletion,
// which will cause snapshot deletion on cloud provider, then backup cannot restore the PV.
// If DeletionPolicy is Retain, just delete it. If DeletionPolicy is Delete, need to
// change DeletionPolicy to Retain before deleting VS, then change DeletionPolicy back to Delete.
func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api.VolumeSnapshot,
volumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent,
backup pkgbackup.Request, logger logrus.FieldLogger) {
var wg sync.WaitGroup
vscMap := make(map[string]*snapshotv1api.VolumeSnapshotContent)
for _, vsc := range volumeSnapshotContents {
vscMap[vsc.Name] = vsc
}
for _, vs := range volumeSnapshots {
wg.Add(1)
go func(vs *snapshotv1api.VolumeSnapshot) {
defer wg.Done()
var vsc *snapshotv1api.VolumeSnapshotContent
modifyVSCFlag := false
if vs.Status.BoundVolumeSnapshotContentName != nil &&
len(*vs.Status.BoundVolumeSnapshotContentName) > 0 {
vsc = vscMap[*vs.Status.BoundVolumeSnapshotContentName]
if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentDelete {
modifyVSCFlag = true
}
}
// Change VolumeSnapshotContent's DeletionPolicy to Retain before deleting VolumeSnapshot,
// because VolumeSnapshotContent will be deleted by deleting VolumeSnapshot, when
// DeletionPolicy is set to Delete, but Velero needs VSC for cleaning snapshot on cloud
// in backup deletion.
if modifyVSCFlag {
logger.Debugf("Patching VolumeSnapshotContent %s", vsc.Name)
original := vsc.DeepCopy()
vsc.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain
if err := c.kbClient.Patch(context.Background(), vsc, kbclient.MergeFrom(original)); err != nil {
logger.Errorf("fail to modify VolumeSnapshotContent %s DeletionPolicy to Retain: %s", vsc.Name, err.Error())
return
}
defer func() {
logger.Debugf("Start to recreate VolumeSnapshotContent %s", vsc.Name)
err := c.recreateVolumeSnapshotContent(vsc)
if err != nil {
logger.Errorf("fail to recreate VolumeSnapshotContent %s: %s", vsc.Name, err.Error())
}
}()
}
// Delete VolumeSnapshot from cluster
logger.Debugf("Deleting VolumeSnapshotContent %s", vsc.Name)
err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots(vs.Namespace).Delete(context.TODO(), vs.Name, metav1.DeleteOptions{})
if err != nil {
logger.Errorf("fail to delete VolumeSnapshot %s/%s: %s", vs.Namespace, vs.Name, err.Error())
}
}(vs)
}
wg.Wait()
}
// recreateVolumeSnapshotContent will delete then re-create VolumeSnapshotContent,
// because some parameter in VolumeSnapshotContent Spec is immutable, e.g. VolumeSnapshotRef
// and Source. Source is updated to let csi-controller thinks the VSC is statically provsisioned with VS.
// Set VolumeSnapshotRef's UID to nil will let the csi-controller finds out the related VS is gone, then
// VSC can be deleted.
func (c *backupController) recreateVolumeSnapshotContent(vsc *snapshotv1api.VolumeSnapshotContent) error {
timeout := 1 * time.Minute
interval := 1 * time.Second
err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Delete(context.TODO(), vsc.Name, metav1.DeleteOptions{})
if err != nil {
return errors.Wrapf(err, "fail to delete VolumeSnapshotContent: %s", vsc.Name)
}
// Check VolumeSnapshotContents is already deleted, before re-creating it.
err = wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Get(context.TODO(), vsc.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, errors.Wrapf(err, fmt.Sprintf("failed to get VolumeSnapshotContent %s", vsc.Name))
}
return false, nil
})
if err != nil {
return errors.Wrapf(err, "fail to retrieve VolumeSnapshotContent %s info", vsc.Name)
}
// Make the VolumeSnapshotContent static
vsc.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{
SnapshotHandle: vsc.Status.SnapshotHandle,
}
// Set VolumeSnapshotRef to none exist one, because VolumeSnapshotContent
// validation webhook will check whether name and namespace are nil.
// external-snapshotter needs Source pointing to snapshot and VolumeSnapshot
// reference's UID to nil to determine the VolumeSnapshotContent is deletable.
vsc.Spec.VolumeSnapshotRef = v1.ObjectReference{
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
Kind: "VolumeSnapshot",
Namespace: "ns-" + string(vsc.UID),
Name: "name-" + string(vsc.UID),
}
// ResourceVersion shouldn't exist for new creation.
vsc.ResourceVersion = ""
_, err = c.volumeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Create(context.TODO(), vsc, metav1.CreateOptions{})
if err != nil {
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", vsc.Name)
}
return nil
}

View File

@@ -172,13 +172,6 @@ func TestProcessBackupValidationFailures(t *testing.T) {
backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result(),
expectedErrs: []string{"backup can't be created because backup storage location read-only is currently in read-only mode"},
},
{
name: "labelSelector as well as orLabelSelectors both are specified in backup request fails validation",
backup: defaultBackup().LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).OrLabelSelector([]*metav1.LabelSelector{{MatchLabels: map[string]string{"a1": "b1"}}, {MatchLabels: map[string]string{"a2": "b2"}},
{MatchLabels: map[string]string{"a3": "b3"}}, {MatchLabels: map[string]string{"a4": "b4"}}}).Result(),
backupLocation: defaultBackupLocation,
expectedErrs: []string{"encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified"},
},
}
for _, test := range tests {
@@ -736,7 +729,6 @@ func TestProcessBackupCompletions(t *testing.T) {
},
Status: velerov1api.BackupStatus{
Phase: velerov1api.BackupPhaseFailed,
FailureReason: "backup already exists in object storage",
Version: 1,
FormatVersion: "1.1.0",
StartTimestamp: &timestamp,
@@ -774,7 +766,6 @@ func TestProcessBackupCompletions(t *testing.T) {
},
Status: velerov1api.BackupStatus{
Phase: velerov1api.BackupPhaseFailed,
FailureReason: "error checking if backup already exists in object storage: Backup already exists in object storage",
Version: 1,
FormatVersion: "1.1.0",
StartTimestamp: &timestamp,

View File

@@ -23,18 +23,25 @@ import (
"time"
jsonpatch "github.com/evanphx/json-patch"
snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
snapshotv1beta1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1beta1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
ctrl "sigs.k8s.io/controller-runtime"
"k8s.io/client-go/tools/cache"
"github.com/vmware-tanzu/velero/internal/delete"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
"github.com/vmware-tanzu/velero/pkg/discovery"
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1"
velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1"
"github.com/vmware-tanzu/velero/pkg/label"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/persistence"
@@ -47,202 +54,250 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
resticTimeout = time.Minute
deleteBackupRequestMaxAge = 24 * time.Hour
)
const resticTimeout = time.Minute
type backupDeletionReconciler struct {
client.Client
logger logrus.FieldLogger
backupTracker BackupTracker
resticMgr restic.RepositoryManager
metrics *metrics.ServerMetrics
clock clock.Clock
discoveryHelper discovery.Helper
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
backupStoreGetter persistence.ObjectBackupStoreGetter
type backupDeletionController struct {
*genericController
deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter
deleteBackupRequestLister velerov1listers.DeleteBackupRequestLister
backupClient velerov1client.BackupsGetter
restoreLister velerov1listers.RestoreLister
restoreClient velerov1client.RestoresGetter
backupTracker BackupTracker
resticMgr restic.RepositoryManager
podvolumeBackupLister velerov1listers.PodVolumeBackupLister
kbClient client.Client
snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister
csiSnapshotLister snapshotv1beta1listers.VolumeSnapshotLister
csiSnapshotContentLister snapshotv1beta1listers.VolumeSnapshotContentLister
csiSnapshotClient *snapshotterClientSet.Clientset
processRequestFunc func(*velerov1api.DeleteBackupRequest) error
clock clock.Clock
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
backupStoreGetter persistence.ObjectBackupStoreGetter
metrics *metrics.ServerMetrics
helper discovery.Helper
}
// NewBackupDeletionReconciler creates a new backup deletion reconciler.
func NewBackupDeletionReconciler(
// NewBackupDeletionController creates a new backup deletion controller.
func NewBackupDeletionController(
logger logrus.FieldLogger,
client client.Client,
deleteBackupRequestInformer velerov1informers.DeleteBackupRequestInformer,
deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter,
backupClient velerov1client.BackupsGetter,
restoreLister velerov1listers.RestoreLister,
restoreClient velerov1client.RestoresGetter,
backupTracker BackupTracker,
resticMgr restic.RepositoryManager,
metrics *metrics.ServerMetrics,
helper discovery.Helper,
podvolumeBackupLister velerov1listers.PodVolumeBackupLister,
kbClient client.Client,
snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister,
csiSnapshotLister snapshotv1beta1listers.VolumeSnapshotLister,
csiSnapshotContentLister snapshotv1beta1listers.VolumeSnapshotContentLister,
csiSnapshotClient *snapshotterClientSet.Clientset,
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager,
backupStoreGetter persistence.ObjectBackupStoreGetter,
) *backupDeletionReconciler {
return &backupDeletionReconciler{
Client: client,
logger: logger,
backupTracker: backupTracker,
resticMgr: resticMgr,
metrics: metrics,
clock: clock.RealClock{},
discoveryHelper: helper,
metrics *metrics.ServerMetrics,
helper discovery.Helper,
) Interface {
c := &backupDeletionController{
genericController: newGenericController(BackupDeletion, logger),
deleteBackupRequestClient: deleteBackupRequestClient,
deleteBackupRequestLister: deleteBackupRequestInformer.Lister(),
backupClient: backupClient,
restoreLister: restoreLister,
restoreClient: restoreClient,
backupTracker: backupTracker,
resticMgr: resticMgr,
podvolumeBackupLister: podvolumeBackupLister,
kbClient: kbClient,
snapshotLocationLister: snapshotLocationLister,
csiSnapshotLister: csiSnapshotLister,
csiSnapshotContentLister: csiSnapshotContentLister,
csiSnapshotClient: csiSnapshotClient,
metrics: metrics,
helper: helper,
// use variables to refer to these functions so they can be
// replaced with fakes for testing.
newPluginManager: newPluginManager,
backupStoreGetter: backupStoreGetter,
clock: &clock.RealClock{},
}
c.syncHandler = c.processQueueItem
c.processRequestFunc = c.processRequest
deleteBackupRequestInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.enqueue,
},
)
c.resyncPeriod = time.Hour
c.resyncFunc = c.deleteExpiredRequests
return c
}
func (r *backupDeletionReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Make sure the expired requests can be deleted eventually
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.DeleteBackupRequestList{}, time.Hour)
return ctrl.NewControllerManagedBy(mgr).
For(&velerov1api.DeleteBackupRequest{}).
Watches(s, nil).
Complete(r)
func (c *backupDeletionController) processQueueItem(key string) error {
log := c.logger.WithField("key", key)
log.Debug("Running processItem")
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return errors.Wrap(err, "error splitting queue key")
}
req, err := c.deleteBackupRequestLister.DeleteBackupRequests(ns).Get(name)
if apierrors.IsNotFound(err) {
log.Debug("Unable to find DeleteBackupRequest")
return nil
}
if err != nil {
return errors.Wrap(err, "error getting DeleteBackupRequest")
}
switch req.Status.Phase {
case velerov1api.DeleteBackupRequestPhaseProcessed:
// Don't do anything because it's already been processed
default:
// Don't mutate the shared cache
reqCopy := req.DeepCopy()
return c.processRequestFunc(reqCopy)
}
return nil
}
// +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=delete
func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.logger.WithFields(logrus.Fields{
"controller": BackupDeletion,
"deletebackuprequest": req.String(),
func (c *backupDeletionController) processRequest(req *velerov1api.DeleteBackupRequest) error {
log := c.logger.WithFields(logrus.Fields{
"namespace": req.Namespace,
"name": req.Name,
"backup": req.Spec.BackupName,
})
log.Debug("Getting deletebackuprequest")
dbr := &velerov1api.DeleteBackupRequest{}
if err := r.Get(ctx, req.NamespacedName, dbr); err != nil {
if apierrors.IsNotFound(err) {
log.Debug("Unable to find the deletebackuprequest")
return ctrl.Result{}, nil
}
log.WithError(err).Error("Error getting deletebackuprequest")
return ctrl.Result{}, err
}
// Since we use the reconciler along with the PeriodicalEnqueueSource, there may be reconciliation triggered by
// stale requests.
if dbr.Status.Phase == velerov1api.DeleteBackupRequestPhaseProcessed {
age := r.clock.Now().Sub(dbr.CreationTimestamp.Time)
if age >= deleteBackupRequestMaxAge { // delete the expired request
log.Debug("The request is expired, deleting it.")
if err := r.Delete(ctx, dbr); err != nil {
log.WithError(err).Error("Error deleting DeleteBackupRequest")
}
} else {
log.Info("The request has been processed, skip.")
}
return ctrl.Result{}, nil
}
var err error
// Make sure we have the backup name
if dbr.Spec.BackupName == "" {
_, err := r.patchDeleteBackupRequest(ctx, dbr, func(res *velerov1api.DeleteBackupRequest) {
res.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
res.Status.Errors = []string{"spec.backupName is required"}
if req.Spec.BackupName == "" {
_, err = c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
r.Status.Errors = []string{"spec.backupName is required"}
})
return ctrl.Result{}, err
return err
}
log = log.WithField("backup", dbr.Spec.BackupName)
// Remove any existing deletion requests for this backup so we only have
// one at a time
if errs := r.deleteExistingDeletionRequests(ctx, dbr, log); errs != nil {
return ctrl.Result{}, kubeerrs.NewAggregate(errs)
if errs := c.deleteExistingDeletionRequests(req, log); errs != nil {
return kubeerrs.NewAggregate(errs)
}
// Don't allow deleting an in-progress backup
if r.backupTracker.Contains(dbr.Namespace, dbr.Spec.BackupName) {
_, err := r.patchDeleteBackupRequest(ctx, dbr, func(r *velerov1api.DeleteBackupRequest) {
if c.backupTracker.Contains(req.Namespace, req.Spec.BackupName) {
_, err = c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
r.Status.Errors = []string{"backup is still in progress"}
})
return ctrl.Result{}, err
return err
}
// Get the backup we're trying to delete
backup := &velerov1api.Backup{}
if err := r.Get(ctx, types.NamespacedName{
Namespace: dbr.Namespace,
Name: dbr.Spec.BackupName,
}, backup); apierrors.IsNotFound(err) {
backup, err := c.backupClient.Backups(req.Namespace).Get(context.TODO(), req.Spec.BackupName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
// Couldn't find backup - update status to Processed and record the not-found error
_, err = r.patchDeleteBackupRequest(ctx, dbr, func(r *velerov1api.DeleteBackupRequest) {
req, err = c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
r.Status.Errors = []string{"backup not found"}
})
return ctrl.Result{}, err
} else if err != nil {
return ctrl.Result{}, errors.Wrap(err, "error getting backup")
return err
}
if err != nil {
return errors.Wrap(err, "error getting backup")
}
// Don't allow deleting backups in read-only storage locations
location := &velerov1api.BackupStorageLocation{}
if err := r.Get(context.Background(), client.ObjectKey{
if err := c.kbClient.Get(context.Background(), client.ObjectKey{
Namespace: backup.Namespace,
Name: backup.Spec.StorageLocation,
}, location); err != nil {
if apierrors.IsNotFound(err) {
_, err := r.patchDeleteBackupRequest(ctx, dbr, func(r *velerov1api.DeleteBackupRequest) {
_, err := c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
r.Status.Errors = append(r.Status.Errors, fmt.Sprintf("backup storage location %s not found", backup.Spec.StorageLocation))
})
return ctrl.Result{}, err
return err
}
return ctrl.Result{}, errors.Wrap(err, "error getting backup storage location")
return errors.Wrap(err, "error getting backup storage location")
}
if location.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly {
_, err := r.patchDeleteBackupRequest(ctx, dbr, func(r *velerov1api.DeleteBackupRequest) {
_, err := c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
r.Status.Errors = append(r.Status.Errors, fmt.Sprintf("cannot delete backup because backup storage location %s is currently in read-only mode", location.Name))
})
return ctrl.Result{}, err
return err
}
// if the request object has no labels defined, initialise an empty map since
// we will be updating labels
if dbr.Labels == nil {
dbr.Labels = map[string]string{}
if req.Labels == nil {
req.Labels = map[string]string{}
}
// Update status to InProgress and set backup-name and backup-uid label if needed
dbr, err := r.patchDeleteBackupRequest(ctx, dbr, func(r *velerov1api.DeleteBackupRequest) {
// Update status to InProgress and set backup-name label if needed
req, err = c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseInProgress
if r.Labels[velerov1api.BackupNameLabel] == "" {
r.Labels[velerov1api.BackupNameLabel] = label.GetValidName(dbr.Spec.BackupName)
}
if r.Labels[velerov1api.BackupUIDLabel] == "" {
r.Labels[velerov1api.BackupUIDLabel] = string(backup.UID)
if req.Labels[velerov1api.BackupNameLabel] == "" {
req.Labels[velerov1api.BackupNameLabel] = label.GetValidName(req.Spec.BackupName)
}
})
if err != nil {
return ctrl.Result{}, err
return err
}
// Set backup-uid label if needed
if req.Labels[velerov1api.BackupUIDLabel] == "" {
req, err = c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
req.Labels[velerov1api.BackupUIDLabel] = string(backup.UID)
})
if err != nil {
return err
}
}
// Set backup status to Deleting
backup, err = r.patchBackup(ctx, backup, func(b *velerov1api.Backup) {
backup, err = c.patchBackup(backup, func(b *velerov1api.Backup) {
b.Status.Phase = velerov1api.BackupPhaseDeleting
})
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error setting backup phase to deleting")
return ctrl.Result{}, err
return err
}
backupScheduleName := backup.GetLabels()[velerov1api.ScheduleNameLabel]
r.metrics.RegisterBackupDeletionAttempt(backupScheduleName)
c.metrics.RegisterBackupDeletionAttempt(backupScheduleName)
pluginManager := r.newPluginManager(log)
var errs []string
pluginManager := c.newPluginManager(log)
defer pluginManager.CleanupClients()
backupStore, err := r.backupStoreGetter.Get(location, pluginManager, log)
backupStore, err := c.backupStoreGetter.Get(location, pluginManager, log)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "error getting the backup store")
return errors.Wrap(err, "error getting the backup store")
}
actions, err := pluginManager.GetDeleteItemActions()
log.Debugf("%d actions before invoking actions", len(actions))
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "error getting delete item actions")
return errors.Wrap(err, "error getting delete item actions")
}
// don't defer CleanupClients here, since it was already called above.
@@ -253,13 +308,13 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
if err != nil {
log.WithError(err).Errorf("Unable to download tarball for backup %s, skipping associated DeleteItemAction plugins", backup.Name)
} else {
defer closeAndRemoveFile(backupFile, r.logger)
defer closeAndRemoveFile(backupFile, c.logger)
ctx := &delete.Context{
Backup: backup,
BackupReader: backupFile,
Actions: actions,
Log: r.logger,
DiscoveryHelper: r.discoveryHelper,
Log: c.logger,
DiscoveryHelper: c.helper,
Filesystem: filesystem.NewFileSystem(),
}
@@ -267,13 +322,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// but what do we do with the error returned? We can't just swallow it as that may lead to dangling resources.
err = delete.InvokeDeleteActions(ctx)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "error invoking delete item actions")
return errors.Wrap(err, "error invoking delete item actions")
}
}
}
var errs []string
if backupStore != nil {
log.Info("Removing PV snapshots")
@@ -287,7 +340,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
volumeSnapshotter, ok := volumeSnapshotters[snapshot.Spec.Location]
if !ok {
if volumeSnapshotter, err = volumeSnapshottersForVSL(ctx, backup.Namespace, snapshot.Spec.Location, r.Client, pluginManager); err != nil {
if volumeSnapshotter, err = volumeSnapshotterForSnapshotLocation(backup.Namespace, snapshot.Spec.Location, c.snapshotLocationLister, pluginManager); err != nil {
errs = append(errs, err.Error())
continue
}
@@ -300,8 +353,9 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
}
}
log.Info("Removing restic snapshots")
if deleteErrs := r.deleteResticSnapshots(ctx, backup); len(deleteErrs) > 0 {
if deleteErrs := c.deleteResticSnapshots(backup); len(deleteErrs) > 0 {
for _, err := range deleteErrs {
errs = append(errs, err.Error())
}
@@ -315,19 +369,15 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
log.Info("Removing restores")
restoreList := &velerov1api.RestoreList{}
selector := labels.Everything()
if err := r.List(ctx, restoreList, &client.ListOptions{
Namespace: backup.Namespace,
LabelSelector: selector,
}); err != nil {
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
} else {
for _, restore := range restoreList.Items {
for _, restore := range restores {
if restore.Spec.BackupName != backup.Name {
continue
}
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore))
restoreLog := log.WithField("restore", kube.NamespaceAndName(restore))
restoreLog.Info("Deleting restore log/results from backup storage")
if err := backupStore.DeleteRestore(restore.Name); err != nil {
@@ -337,156 +387,202 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
restoreLog.Info("Deleting restore referencing backup")
if err := r.Delete(ctx, &restore); err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error())
if err := c.restoreClient.Restores(restore.Namespace).Delete(context.TODO(), restore.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(restore)).Error())
}
}
}
if len(errs) == 0 {
// Only try to delete the backup object from kube if everything preceding went smoothly
if err := r.Delete(ctx, backup); err != nil {
err = c.backupClient.Backups(backup.Namespace).Delete(context.TODO(), backup.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting backup %s", kube.NamespaceAndName(backup)).Error())
}
}
if len(errs) == 0 {
r.metrics.RegisterBackupDeletionSuccess(backupScheduleName)
c.metrics.RegisterBackupDeletionSuccess(backupScheduleName)
} else {
r.metrics.RegisterBackupDeletionFailed(backupScheduleName)
c.metrics.RegisterBackupDeletionFailed(backupScheduleName)
}
// Update status to processed and record errors
if _, err := r.patchDeleteBackupRequest(ctx, dbr, func(r *velerov1api.DeleteBackupRequest) {
req, err = c.patchDeleteBackupRequest(req, func(r *velerov1api.DeleteBackupRequest) {
r.Status.Phase = velerov1api.DeleteBackupRequestPhaseProcessed
r.Status.Errors = errs
}); err != nil {
return ctrl.Result{}, err
})
if err != nil {
return err
}
// Everything deleted correctly, so we can delete all DeleteBackupRequests for this backup
if len(errs) == 0 {
labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s,%s=%s", velerov1api.BackupNameLabel, label.GetValidName(backup.Name), velerov1api.BackupUIDLabel, backup.UID))
if err != nil {
// Should not be here
r.logger.WithError(err).WithField("backup", kube.NamespaceAndName(backup)).Error("error creating label selector for the backup for deleting DeleteBackupRequests")
return ctrl.Result{}, nil
}
alldbr := &velerov1api.DeleteBackupRequest{}
err = r.DeleteAllOf(ctx, alldbr, client.MatchingLabelsSelector{
Selector: labelSelector,
}, client.InNamespace(dbr.Namespace))
listOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID))
err = c.deleteBackupRequestClient.DeleteBackupRequests(req.Namespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
if err != nil {
// If this errors, all we can do is log it.
r.logger.WithError(err).WithField("backup", kube.NamespaceAndName(backup)).Error("error deleting all associated DeleteBackupRequests after successfully deleting the backup")
c.logger.WithField("backup", kube.NamespaceAndName(backup)).Error("error deleting all associated DeleteBackupRequests after successfully deleting the backup")
}
}
log.Infof("Reconciliation done")
return ctrl.Result{}, nil
return nil
}
func volumeSnapshottersForVSL(
ctx context.Context,
namespace, vslName string,
client client.Client,
func volumeSnapshotterForSnapshotLocation(
namespace, snapshotLocationName string,
snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister,
pluginManager clientmgmt.Manager,
) (velero.VolumeSnapshotter, error) {
vsl := &velerov1api.VolumeSnapshotLocation{}
if err := client.Get(ctx, types.NamespacedName{
Namespace: namespace,
Name: vslName,
}, vsl); err != nil {
return nil, errors.Wrapf(err, "error getting volume snapshot location %s", vslName)
}
volumeSnapshotter, err := pluginManager.GetVolumeSnapshotter(vsl.Spec.Provider)
snapshotLocation, err := snapshotLocationLister.VolumeSnapshotLocations(namespace).Get(snapshotLocationName)
if err != nil {
return nil, errors.Wrapf(err, "error getting volume snapshotter for provider %s", vsl.Spec.Provider)
return nil, errors.Wrapf(err, "error getting volume snapshot location %s", snapshotLocationName)
}
if err = volumeSnapshotter.Init(vsl.Spec.Config); err != nil {
return nil, errors.Wrapf(err, "error initializing volume snapshotter for volume snapshot location %s", vslName)
volumeSnapshotter, err := pluginManager.GetVolumeSnapshotter(snapshotLocation.Spec.Provider)
if err != nil {
return nil, errors.Wrapf(err, "error getting volume snapshotter for provider %s", snapshotLocation.Spec.Provider)
}
if err = volumeSnapshotter.Init(snapshotLocation.Spec.Config); err != nil {
return nil, errors.Wrapf(err, "error initializing volume snapshotter for volume snapshot location %s", snapshotLocationName)
}
return volumeSnapshotter, nil
}
func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Context, req *velerov1api.DeleteBackupRequest, log logrus.FieldLogger) []error {
func (c *backupDeletionController) deleteExistingDeletionRequests(req *velerov1api.DeleteBackupRequest, log logrus.FieldLogger) []error {
log.Info("Removing existing deletion requests for backup")
dbrList := &velerov1api.DeleteBackupRequestList{}
selector := label.NewSelectorForBackup(req.Spec.BackupName)
if err := r.List(ctx, dbrList, &client.ListOptions{
Namespace: req.Namespace,
LabelSelector: selector,
}); err != nil {
dbrs, err := c.deleteBackupRequestLister.DeleteBackupRequests(req.Namespace).List(selector)
if err != nil {
return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")}
}
var errs []error
for _, dbr := range dbrList.Items {
for _, dbr := range dbrs {
if dbr.Name == req.Name {
continue
}
if err := r.Delete(ctx, &dbr); err != nil {
if err := c.deleteBackupRequestClient.DeleteBackupRequests(req.Namespace).Delete(context.TODO(), dbr.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(err))
} else {
log.Infof("deletion request '%s' removed.", dbr.Name)
}
}
return errs
}
func (r *backupDeletionReconciler) deleteResticSnapshots(ctx context.Context, backup *velerov1api.Backup) []error {
if r.resticMgr == nil {
func (c *backupDeletionController) deleteResticSnapshots(backup *velerov1api.Backup) []error {
if c.resticMgr == nil {
return nil
}
snapshots, err := restic.GetSnapshotsInBackup(ctx, backup, r.Client)
snapshots, err := restic.GetSnapshotsInBackup(backup, c.podvolumeBackupLister)
if err != nil {
return []error{err}
}
ctx2, cancelFunc := context.WithTimeout(ctx, resticTimeout)
ctx, cancelFunc := context.WithTimeout(context.Background(), resticTimeout)
defer cancelFunc()
var errs []error
for _, snapshot := range snapshots {
if err := r.resticMgr.Forget(ctx2, snapshot); err != nil {
if err := c.resticMgr.Forget(ctx, snapshot); err != nil {
errs = append(errs, err)
}
}
return errs
}
func (r *backupDeletionReconciler) patchDeleteBackupRequest(ctx context.Context, req *velerov1api.DeleteBackupRequest, mutate func(*velerov1api.DeleteBackupRequest)) (*velerov1api.DeleteBackupRequest, error) {
original := req.DeepCopy()
mutate(req)
if err := r.Patch(ctx, req, client.MergeFrom(original)); err != nil {
return nil, errors.Wrap(err, "error patching the deletebackuprquest")
const deleteBackupRequestMaxAge = 24 * time.Hour
func (c *backupDeletionController) deleteExpiredRequests() {
c.logger.Info("Checking for expired DeleteBackupRequests")
defer c.logger.Info("Done checking for expired DeleteBackupRequests")
// Our shared informer factory filters on a single namespace, so asking for all is ok here.
requests, err := c.deleteBackupRequestLister.List(labels.Everything())
if err != nil {
c.logger.WithError(err).Error("unable to check for expired DeleteBackupRequests")
return
}
now := c.clock.Now()
for _, req := range requests {
if req.Status.Phase != velerov1api.DeleteBackupRequestPhaseProcessed {
continue
}
age := now.Sub(req.CreationTimestamp.Time)
if age >= deleteBackupRequestMaxAge {
reqLog := c.logger.WithFields(logrus.Fields{"namespace": req.Namespace, "name": req.Name})
reqLog.Info("Deleting expired DeleteBackupRequest")
err = c.deleteBackupRequestClient.DeleteBackupRequests(req.Namespace).Delete(context.TODO(), req.Name, metav1.DeleteOptions{})
if err != nil {
reqLog.WithError(err).Error("Error deleting DeleteBackupRequest")
}
}
}
}
func (c *backupDeletionController) patchDeleteBackupRequest(req *velerov1api.DeleteBackupRequest, mutate func(*velerov1api.DeleteBackupRequest)) (*velerov1api.DeleteBackupRequest, error) {
// Record original json
oldData, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "error marshalling original DeleteBackupRequest")
}
// Mutate
mutate(req)
// Record new json
newData, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "error marshalling updated DeleteBackupRequest")
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, errors.Wrap(err, "error creating json merge patch for DeleteBackupRequest")
}
req, err = c.deleteBackupRequestClient.DeleteBackupRequests(req.Namespace).Patch(context.TODO(), req.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return nil, errors.Wrap(err, "error patching DeleteBackupRequest")
}
return req, nil
}
func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) {
//TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the bakcup resource is refactored
func (c *backupDeletionController) patchBackup(backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) {
// Record original json
oldData, err := json.Marshal(backup)
if err != nil {
return nil, errors.Wrap(err, "error marshalling original Backup")
}
newBackup := backup.DeepCopy()
mutate(newBackup)
newData, err := json.Marshal(newBackup)
// Mutate
mutate(backup)
// Record new json
newData, err := json.Marshal(backup)
if err != nil {
return nil, errors.Wrap(err, "error marshalling updated Backup")
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, errors.Wrap(err, "error creating json merge patch for Backup")
}
if err := r.Client.Patch(ctx, backup, client.RawPatch(types.MergePatchType, patchBytes)); err != nil {
backup, err = c.backupClient.Backups(backup.Namespace).Patch(context.TODO(), backup.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return nil, errors.Wrap(err, "error patching Backup")
}
return backup, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -23,24 +23,17 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/internal/storage"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/persistence"
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
const (
// keep the enqueue period a smaller value to make sure the BSL can be validated as expected.
// The BSL validation frequency is 1 minute by default, if we set the enqueue period as 1 minute,
// this will cause the actual validation interval for each BSL to be 2 minutes
bslValidationEnqueuePeriod = 10 * time.Second
)
// BackupStorageLocationReconciler reconciles a BackupStorageLocation object
@@ -60,88 +53,88 @@ type BackupStorageLocationReconciler struct {
// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations/status,verbs=get;update;patch
func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var unavailableErrors []string
var location velerov1api.BackupStorageLocation
log := r.Log.WithField("controller", BackupStorageLocation)
log := r.Log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, req.NamespacedName.String())
log.Debug("Validating availability of BackupStorageLocation")
log.Debug("Validating availability of backup storage locations.")
locationList, err := storage.ListBackupStorageLocations(r.Ctx, r.Client, req.Namespace)
if err != nil {
log.WithError(err).Error("No BackupStorageLocations found, at least one is required")
return ctrl.Result{}, nil
log.WithError(err).Error("No backup storage locations found, at least one is required")
return ctrl.Result{}, err
}
pluginManager := r.NewPluginManager(log)
defer pluginManager.CleanupClients()
var defaultFound bool
for _, bsl := range locationList.Items {
if bsl.Spec.Default {
for _, location := range locationList.Items {
if location.Spec.Default {
defaultFound = true
break
}
}
var unavailableErrors []string
var anyVerified bool
for i := range locationList.Items {
location := &locationList.Items[i]
isDefault := location.Spec.Default
log := r.Log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, location.Name)
// TODO(2.0) remove this check since the server default will be deprecated
if !defaultFound && location.Name == r.DefaultBackupLocationInfo.StorageLocation {
// For backward-compatible, to configure the backup storage location as the default if
// none of the BSLs be marked as the default and the BSL name matches against the
// "velero server --default-backup-storage-location".
isDefault = true
defaultFound = true
}
if bsl.Name == req.Name && bsl.Namespace == req.Namespace {
location = bsl
if !storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.DefaultBackupLocationInfo.ServerValidationFrequency, log) {
log.Debug("Validation not required, skipping...")
continue
}
}
if location.Name == "" || location.Namespace == "" {
log.WithError(err).Error("BackupStorageLocation is not found")
return ctrl.Result{}, nil
}
isDefault := location.Spec.Default
// TODO(2.0) remove this check since the server default will be deprecated
if !defaultFound && location.Name == r.DefaultBackupLocationInfo.StorageLocation {
// For backward-compatible, to configure the backup storage location as the default if
// none of the BSLs be marked as the default and the BSL name matches against the
// "velero server --default-backup-storage-location".
isDefault = true
defaultFound = true
}
func() {
var err error
original := location.DeepCopy()
defer func() {
location.Status.LastValidationTime = &metav1.Time{Time: time.Now().UTC()}
if err != nil {
log.Info("BackupStorageLocation is invalid, marking as unavailable")
err = errors.Wrapf(err, "BackupStorageLocation %q is unavailable", location.Name)
unavailableErrors = append(unavailableErrors, err.Error())
location.Status.Phase = velerov1api.BackupStorageLocationPhaseUnavailable
location.Status.Message = err.Error()
} else {
log.Info("BackupStorageLocations is valid, marking as available")
location.Status.Phase = velerov1api.BackupStorageLocationPhaseAvailable
location.Status.Message = ""
}
if err := r.Client.Patch(r.Ctx, &location, client.MergeFrom(original)); err != nil {
log.WithError(err).Error("Error updating BackupStorageLocation phase")
}
}()
backupStore, err := r.BackupStoreGetter.Get(&location, pluginManager, log)
backupStore, err := r.BackupStoreGetter.Get(location, pluginManager, log)
if err != nil {
log.WithError(err).Error("Error getting a backup store")
return
continue
}
log.Info("Validating BackupStorageLocation")
err = backupStore.IsValid()
// Initialize the patch helper.
patchHelper, err := patch.NewHelper(location, r.Client)
if err != nil {
log.WithError(err).Error("fail to validate backup store")
return
log.WithError(err).Error("Error getting a patch helper to update this resource")
continue
}
// updates the default backup location
location.Spec.Default = isDefault
}()
log.Info("Validating backup storage location")
anyVerified = true
if err := backupStore.IsValid(); err != nil {
log.Info("Backup storage location is invalid, marking as unavailable")
unavailableErrors = append(unavailableErrors, errors.Wrapf(err, "Backup storage location %q is unavailable", location.Name).Error())
location.Status.Phase = velerov1api.BackupStorageLocationPhaseUnavailable
} else {
log.Info("Backup storage location valid, marking as available")
location.Status.Phase = velerov1api.BackupStorageLocationPhaseAvailable
}
location.Status.LastValidationTime = &metav1.Time{Time: time.Now().UTC()}
if err := patchHelper.Patch(r.Ctx, location); err != nil {
log.WithError(err).Error("Error updating backup storage location phase")
}
}
if !anyVerified {
log.Debug("No backup storage locations needed to be validated")
}
r.logReconciledPhase(defaultFound, locationList, unavailableErrors)
return ctrl.Result{}, nil
return ctrl.Result{Requeue: true}, nil
}
func (r *BackupStorageLocationReconciler) logReconciledPhase(defaultFound bool, locationList velerov1api.BackupStorageLocationList, errs []string) {
@@ -168,34 +161,21 @@ func (r *BackupStorageLocationReconciler) logReconciledPhase(defaultFound bool,
if numUnavailable+numUnknown == len(locationList.Items) { // no available BSL
if len(errs) > 0 {
log.Errorf("Current BackupStorageLocations available/unavailable/unknown: %v/%v/%v, %s)", numAvailable, numUnavailable, numUnknown, strings.Join(errs, "; "))
log.Errorf("Current backup storage locations available/unavailable/unknown: %v/%v/%v, %s)", numAvailable, numUnavailable, numUnknown, strings.Join(errs, "; "))
} else {
log.Errorf("Current BackupStorageLocations available/unavailable/unknown: %v/%v/%v)", numAvailable, numUnavailable, numUnknown)
log.Errorf("Current backup storage locations available/unavailable/unknown: %v/%v/%v)", numAvailable, numUnavailable, numUnknown)
}
} else if numUnavailable > 0 { // some but not all BSL unavailable
log.Warnf("Unavailable BackupStorageLocations detected: available/unavailable/unknown: %v/%v/%v, %s)", numAvailable, numUnavailable, numUnknown, strings.Join(errs, "; "))
log.Warnf("Unavailable backup storage locations detected: available/unavailable/unknown: %v/%v/%v, %s)", numAvailable, numUnavailable, numUnknown, strings.Join(errs, "; "))
}
if !defaultFound {
log.Warn("There is no existing BackupStorageLocation set as default. Please see `velero backup-location -h` for options.")
log.Warn("There is no existing backup storage location set as default. Please see `velero backup-location -h` for options.")
}
}
func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error {
g := kube.NewPeriodicalEnqueueSource(
r.Log,
mgr.GetClient(),
&velerov1api.BackupStorageLocationList{},
bslValidationEnqueuePeriod,
// Add filter function to enqueue BSL per ValidationFrequency setting.
func(object client.Object) bool {
location := object.(*velerov1api.BackupStorageLocation)
return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.DefaultBackupLocationInfo.ServerValidationFrequency, r.Log.WithField("controller", BackupStorageLocation))
},
)
return ctrl.NewControllerManagedBy(mgr).
// As the "status.LastValidationTime" field is always updated, this triggers new reconciling process, skip the update event that include no spec change to avoid the reconcile loop
For(&velerov1api.BackupStorageLocation{}, builder.WithPredicates(kube.SpecChangePredicate{})).
Watches(g, nil).
For(&velerov1api.BackupStorageLocation{}).
Complete(r)
}

View File

@@ -81,7 +81,7 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed())
r := BackupStorageLocationReconciler{
Ctx: ctx,
Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(),
Client: fake.NewFakeClientWithScheme(scheme.Scheme, locations),
DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
StorageLocation: "location-1",
ServerValidationFrequency: 0,
@@ -91,17 +91,18 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
Log: velerotest.NewLogger(),
}
actualResult, err := r.Reconcile(ctx, ctrl.Request{
NamespacedName: types.NamespacedName{Namespace: "ns-1"},
})
Expect(actualResult).To(BeEquivalentTo(ctrl.Result{Requeue: true}))
Expect(err).To(BeNil())
// Assertions
for i, location := range locations.Items {
actualResult, err := r.Reconcile(ctx, ctrl.Request{
NamespacedName: types.NamespacedName{Namespace: location.Namespace, Name: location.Name},
})
Expect(actualResult).To(BeEquivalentTo(ctrl.Result{}))
Expect(err).To(BeNil())
key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace}
instance := &velerov1api.BackupStorageLocation{}
err = r.Client.Get(ctx, key, instance)
err := r.Client.Get(ctx, key, instance)
Expect(err).To(BeNil())
Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault))
Expect(instance.Status.Phase).To(BeIdenticalTo(tests[i].expectedPhase))
@@ -146,7 +147,7 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed())
r := BackupStorageLocationReconciler{
Ctx: ctx,
Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(),
Client: fake.NewFakeClientWithScheme(scheme.Scheme, locations),
DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
StorageLocation: "default",
ServerValidationFrequency: 0,
@@ -156,19 +157,91 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
Log: velerotest.NewLogger(),
}
actualResult, err := r.Reconcile(ctx, ctrl.Request{
NamespacedName: types.NamespacedName{Namespace: "ns-1"},
})
Expect(actualResult).To(BeEquivalentTo(ctrl.Result{Requeue: true}))
Expect(err).To(BeNil())
// Assertions
for i, location := range locations.Items {
actualResult, err := r.Reconcile(ctx, ctrl.Request{
NamespacedName: types.NamespacedName{Namespace: location.Namespace, Name: location.Name},
})
Expect(actualResult).To(BeEquivalentTo(ctrl.Result{}))
Expect(err).To(BeNil())
key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace}
instance := &velerov1api.BackupStorageLocation{}
err = r.Client.Get(ctx, key, instance)
err := r.Client.Get(ctx, key, instance)
Expect(err).To(BeNil())
Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault))
}
})
It("Should not patch a backup storage location object status phase if the location's validation frequency is specifically set to zero", func() {
tests := []struct {
backupLocation *velerov1api.BackupStorageLocation
isValidError error
expectedIsDefault bool
expectedPhase velerov1api.BackupStorageLocationPhase
wantErr bool
}{
{
backupLocation: builder.ForBackupStorageLocation("ns-1", "location-1").ValidationFrequency(0).LastValidationTime(time.Now()).Result(),
isValidError: nil,
expectedIsDefault: false,
expectedPhase: "",
wantErr: false,
},
{
backupLocation: builder.ForBackupStorageLocation("ns-1", "location-2").ValidationFrequency(0).LastValidationTime(time.Now()).Result(),
isValidError: nil,
expectedIsDefault: false,
expectedPhase: "",
wantErr: false,
},
}
// Setup
var (
pluginManager = &pluginmocks.Manager{}
backupStores = make(map[string]*persistencemocks.BackupStore)
)
pluginManager.On("CleanupClients").Return(nil)
locations := new(velerov1api.BackupStorageLocationList)
for i, test := range tests {
location := test.backupLocation
locations.Items = append(locations.Items, *location)
backupStores[location.Name] = &persistencemocks.BackupStore{}
backupStores[location.Name].On("IsValid").Return(tests[i].isValidError)
}
// Setup reconciler
Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed())
r := BackupStorageLocationReconciler{
Ctx: ctx,
Client: fake.NewFakeClientWithScheme(scheme.Scheme, locations),
DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
StorageLocation: "default",
ServerValidationFrequency: 0,
},
NewPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
BackupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
Log: velerotest.NewLogger(),
}
actualResult, err := r.Reconcile(ctx, ctrl.Request{
NamespacedName: types.NamespacedName{Namespace: "ns-1"},
})
Expect(actualResult).To(BeEquivalentTo(ctrl.Result{Requeue: true}))
Expect(err).To(BeNil())
// Assertions
for i, location := range locations.Items {
key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace}
instance := &velerov1api.BackupStorageLocation{}
err := r.Client.Get(ctx, key, instance)
Expect(err).To(BeNil())
Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault))
Expect(instance.Status.Phase).To(BeIdenticalTo(tests[i].expectedPhase))
}
})
})

View File

@@ -20,9 +20,7 @@ import (
"context"
"time"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
kuberrs "k8s.io/apimachinery/pkg/api/errors"
@@ -31,8 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/internal/storage"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/features"
@@ -52,7 +48,6 @@ type backupSyncController struct {
kbClient client.Client
podVolumeBackupClient velerov1client.PodVolumeBackupsGetter
backupLister velerov1listers.BackupLister
csiVSLister snapshotv1listers.VolumeSnapshotLister
csiSnapshotClient *snapshotterClientSet.Clientset
kubeClient kubernetes.Interface
namespace string
@@ -67,7 +62,6 @@ func NewBackupSyncController(
kbClient client.Client,
podVolumeBackupClient velerov1client.PodVolumeBackupsGetter,
backupLister velerov1listers.BackupLister,
csiVSLister snapshotv1listers.VolumeSnapshotLister,
syncPeriod time.Duration,
namespace string,
csiSnapshotClient *snapshotterClientSet.Clientset,
@@ -91,7 +85,6 @@ func NewBackupSyncController(
defaultBackupLocation: defaultBackupLocation,
defaultBackupSyncPeriod: syncPeriod,
backupLister: backupLister,
csiVSLister: csiVSLister,
csiSnapshotClient: csiSnapshotClient,
kubeClient: kubeClient,
@@ -290,22 +283,6 @@ func (c *backupSyncController) run() {
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
// we are syncing these objects only to ensure that the storage snapshots are cleaned up
// on backup deletion or expiry.
log.Info("Syncing CSI volumesnapshotclasses in backup")
vsClasses, err := backupStore.GetCSIVolumeSnapshotClasses(backupName)
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error getting CSI volumesnapclasses for this backup from backup store")
continue
}
for _, vsClass := range vsClasses {
vsClass.ResourceVersion = ""
created, err := c.csiSnapshotClient.SnapshotV1().VolumeSnapshotClasses().Create(context.TODO(), vsClass, metav1.CreateOptions{})
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotclass %s into cluster", vsClass.Name)
continue
}
log.Infof("Created CSI volumesnapshotclass %s", created.Name)
}
log.Info("Syncing CSI volumesnapshotcontents in backup")
snapConts, err := backupStore.GetCSIVolumeSnapshotContents(backupName)
if err != nil {
@@ -317,7 +294,7 @@ func (c *backupSyncController) run() {
for _, snapCont := range snapConts {
// TODO: Reset ResourceVersion prior to persisting VolumeSnapshotContents
snapCont.ResourceVersion = ""
created, err := c.csiSnapshotClient.SnapshotV1().VolumeSnapshotContents().Create(context.TODO(), snapCont, metav1.CreateOptions{})
created, err := c.csiSnapshotClient.SnapshotV1beta1().VolumeSnapshotContents().Create(context.TODO(), snapCont, metav1.CreateOptions{})
switch {
case err != nil && kuberrs.IsAlreadyExists(err):
log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name)
@@ -337,7 +314,7 @@ func (c *backupSyncController) run() {
// update the location's last-synced time field
statusPatch := client.MergeFrom(location.DeepCopy())
location.Status.LastSyncedTime = &metav1.Time{Time: time.Now().UTC()}
if err := c.kbClient.Patch(context.Background(), &location, statusPatch); err != nil {
if err := c.kbClient.Status().Patch(context.Background(), &location, statusPatch); err != nil {
log.WithError(errors.WithStack(err)).Error("Error patching backup location's last-synced time")
continue
}
@@ -365,34 +342,11 @@ func (c *backupSyncController) deleteOrphanedBackups(locationName string, backup
if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) {
continue
}
if err := c.backupClient.Backups(backup.Namespace).Delete(context.TODO(), backup.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
} else {
log.Debug("Deleted orphaned backup from cluster")
c.deleteCSISnapshotsByBackup(backup.Name, log)
}
}
}
func (c *backupSyncController) deleteCSISnapshotsByBackup(backupName string, log logrus.FieldLogger) {
if !features.IsEnabled(velerov1api.CSIFeatureFlag) {
return
}
m := client.MatchingLabels{velerov1api.BackupNameLabel: label.GetValidName(backupName)}
if vsList, err := c.csiVSLister.List(label.NewSelectorForBackup(label.GetValidName(backupName))); err != nil {
log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName)
} else {
for _, vs := range vsList {
name := kube.NamespaceAndName(vs.GetObjectMeta())
log.Debugf("Deleting volumesnapshot %s", name)
if err := c.kbClient.Delete(context.TODO(), vs); err != nil {
log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name)
}
}
}
vsc := &snapshotv1api.VolumeSnapshotContent{}
log.Debugf("Deleting volumesnapshotcontents for backup: %s", backupName)
if err := c.kbClient.DeleteAllOf(context.TODO(), vsc, m); err != nil {
log.WithError(err).Warnf("Failed to delete volumesnapshotcontents for backup: %s", backupName)
}
}

View File

@@ -344,7 +344,6 @@ func TestBackupSyncControllerRun(t *testing.T) {
fakeClient,
client.VeleroV1(),
sharedInformers.Velero().V1().Backups().Lister(),
nil, // csiVSLister
time.Duration(0),
test.namespace,
nil, // csiSnapshotClient
@@ -566,7 +565,6 @@ func TestDeleteOrphanedBackups(t *testing.T) {
fakeClient,
client.VeleroV1(),
sharedInformers.Velero().V1().Backups().Lister(),
nil, // csiVSLister
time.Duration(0),
test.namespace,
nil, // csiSnapshotClient
@@ -661,7 +659,6 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) {
fakeClient,
client.VeleroV1(),
sharedInformers.Velero().V1().Backups().Lister(),
nil, // csiVSLister
time.Duration(0),
test.namespace,
nil, // csiSnapshotClient

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -67,10 +68,16 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, errors.WithStack(err)
}
original := downloadRequest.DeepCopy()
// Initialize the patch helper.
patchHelper, err := patch.NewHelper(downloadRequest, r.Client)
if err != nil {
log.WithError(err).Error("Error getting a patch helper to update this resource")
return ctrl.Result{}, errors.WithStack(err)
}
defer func() {
// Always attempt to Patch the downloadRequest object and status after each reconciliation.
if err := r.Client.Patch(ctx, downloadRequest, kbclient.MergeFrom(original)); err != nil {
if err := patchHelper.Patch(ctx, downloadRequest); err != nil {
log.WithError(err).Error("Error updating download request")
return
}

Some files were not shown because too many files have changed in this diff Show More