mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-29 08:02:07 +00:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4cccd57cdd | ||
|
|
eccaa81af5 |
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -28,7 +28,6 @@ about: Tell us about a problem you are experiencing
|
||||
**Environment:**
|
||||
|
||||
- Velero version (use `velero version`):
|
||||
- Velero features (use `velero client config get features`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
|
||||
14
.github/workflows/pr.yml
vendored
14
.github/workflows/pr.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Pull Request CI Check
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Run CI
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
34
.github/workflows/push.yml
vendored
34
.github/workflows/push.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Master CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.14
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build
|
||||
run: make local
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Publish container image
|
||||
run: |
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
./hack/docker-push.sh
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -28,7 +28,6 @@ debug
|
||||
|
||||
/velero
|
||||
.idea/
|
||||
Tiltfile
|
||||
|
||||
.container-*
|
||||
.vimrc
|
||||
|
||||
@@ -41,7 +41,7 @@ builds:
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
ldflags:
|
||||
- -X "github.com/vmware-tanzu/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
- -X "github.com/heptio/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
archives:
|
||||
- name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}"
|
||||
wrap_in_directory: true
|
||||
@@ -52,7 +52,7 @@ checksum:
|
||||
name_template: 'CHECKSUM'
|
||||
release:
|
||||
github:
|
||||
owner: vmware-tanzu
|
||||
owner: heptio
|
||||
name: velero
|
||||
draft: true
|
||||
prerelease: auto
|
||||
|
||||
11
.travis.yml
Normal file
11
.travis.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
script: hack/ci-check.sh
|
||||
104
ADOPTERS.md
104
ADOPTERS.md
@@ -1,104 +0,0 @@
|
||||
# Velero Adopters
|
||||
|
||||
If you're using Velero and want to add your organization to this list,
|
||||
[follow these directions][1]!
|
||||
|
||||
<a href="https://www.bitgo.com" border="0" target="_blank"><img alt="bitgo.com" src="site/img/adopters/BitGo.svg" height="50"></a>
|
||||
<a href="https://www.nirmata.com" border="0" target="_blank"><img alt="nirmata.com" src="site/img/adopters/nirmata.svg" height="50"></a>
|
||||
<a href="https://kyma-project.io/" border="0" target="_blank"><img alt="kyma-project.io" src="site/img/adopters/kyma.svg" height="50"></a>
|
||||
<a href="https://redhat.com/" border="0" target="_blank"><img alt="redhat.com" src="site/img/adopters/redhat.svg" height="50"></a>
|
||||
<a href="https://dellemc.com/" border="0" target="_blank"><img alt="dellemc.com" src="site/img/adopters/DellEMC.png" height="50"></a>
|
||||
<a href="https://bugsnag.com/" border="0" target="_blank"><img alt="bugsnag.com" src="site/img/adopters/bugsnag.svg" height="50"></a>
|
||||
<a href="https://okteto.com/" border="0" target="_blank"><img alt="okteto.com" src="site/img/adopters/okteto.svg" height="50"></a>
|
||||
<a href="https://banzaicloud.com/" border="0" target="_blank"><img alt="banzaicloud.com" src="site/img/adopters/banzaicloud.svg" height="50"></a>
|
||||
<a href="https://sighup.io/" border="0" target="_blank"><img alt="sighup.io" src="site/img/adopters/sighup.svg" height="50"></a>
|
||||
<a href="https://mayadata.io/" border="0" target="_blank"><img alt="mayadata.io" src="site/img/adopters/mayadata.svg" height="50"></a>
|
||||
|
||||
## Success Stories
|
||||
|
||||
Below is a list of adopters of Velero in **production environments** that have
|
||||
publicly shared the details of how they use it.
|
||||
|
||||
**[BitGo][20]**
|
||||
BitGo uses Velero backup and restore capabilities to seamlessly provision and scale fullnode statefulsets on the fly as well as having it serve an integral piece for our kubernetes disaster-recovery story.
|
||||
|
||||
**[Bugsnag][30]**
|
||||
We use Velero for managing backups of an internal instance of our on-premise clustered solution. We also recommend our users of [on-premise Bugsnag installations][31] use Velero for [managing their own backups][32].
|
||||
|
||||
**[Banzai Cloud][60]**
|
||||
[Banzai Cloud Pipeline][61] is a Kubernetes-based microservices platform that integrates services needed for Day-1 and Day-2 operations along with first-class support both for on-prem and hybrid multi-cloud deployments. We use Velero to periodically [backup and restore these clusters in case of disasters][62].
|
||||
|
||||
## Solutions built with Velero
|
||||
|
||||
Below is a list of solutions where Velero is being used as a component.
|
||||
|
||||
**[Nirmata][10]**
|
||||
We have integrated our [solution with Velero][11] to provide our customers with out of box backup/DR.
|
||||
|
||||
**[Kyma][40]**
|
||||
Kyma [integrates with Velero][41] to effortlessly back up and restore Kyma clusters with all its resources. Velero capabilities allow Kyma users to define and run manual and scheduled backups in order to successfully handle a disaster-recovery scenario.
|
||||
|
||||
**[Red Hat][50]**
|
||||
Red Hat has developed the [Cluster Application Migration Tool][51] which uses [Velero and Restic][52] to drive the migration of applications between OpenShift clusters.
|
||||
|
||||
**[Dell EMC][70]**
|
||||
For Kubernetes environments, [PowerProtect Data Manager][71] leverages the Container Storage Interface (CSI) framework to take snapshots to back up the persistent data or the data that the application creates e.g. databases. [Dell EMC leverages Velero][72] to backup the namespace configuration files (also known as Namespace meta data) for enterprise grade data protection.
|
||||
|
||||
**[SIGHUP][80]**
|
||||
SIGHUP integrates Velero in its [Fury Kubernetes Distribution][81] providing predefined schedules and configurations to ensure an optimized disaster recovery experience.
|
||||
[Fury Kubernetes Disaster Recovery Module][82] is ready to be deployed into any Kubernetes cluster running anywhere.
|
||||
|
||||
**[MayaData][90]**
|
||||
MayaData is a large user of Velero as well as a contributor. MayaData offers a Data Agility platform called [OpenEBS Director][91], that helps customers confidently and easily manage stateful workloads in Kubernetes. Velero is one of the core software building block of the OpenEBS Director's [DMaaS or data migration as a service offering][92] used to enable data protection strategies.
|
||||
|
||||
**[Okteto][93]**
|
||||
Okteto integrates Velero in [Okteto Cloud][94] and [Okteto Enterprise][95] to periodically backup and restore our clusters for disaster recovery. Velero is also a core software building block to provide namespace cloning capabilities, a feature that allows our users cloning staging environments into their personal development namespace for providing production-like development environments.
|
||||
|
||||
## Adding your organization to the list of Velero Adopters
|
||||
|
||||
If you are using Velero and would like to be included in the list of `Velero Adopters`, add an SVG version of your logo to the `site/img/adopters` directory in this repo and submit a [pull request][3] with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). See this for an example [PR][4].
|
||||
|
||||
### Adding a logo to velero.io
|
||||
|
||||
If you would like to add your logo to a future `Adopters of Velero` section on [velero.io][2], follow the steps above to add your organization to the list of Velero Adopters. Our community will follow up and publish it to the [velero.io][2] website.
|
||||
|
||||
[1]: #adding-a-logo-to-veleroio
|
||||
[2]: https://velero.io
|
||||
[3]: https://github.com/vmware-tanzu/velero/pulls
|
||||
[4]: https://github.com/vmware-tanzu/velero/pull/2242
|
||||
|
||||
[10]: https://www.nirmata.com/2019/08/14/kubernetes-disaster-recovery-using-velero-and-nirmata/
|
||||
[11]: https://nirmata.com
|
||||
|
||||
[20]: https://bitgo.com
|
||||
|
||||
[30]: https://bugsnag.com
|
||||
[31]: https://www.bugsnag.com/on-premise
|
||||
[32]: https://docs.bugsnag.com/on-premise/clustered/backup-restore/
|
||||
|
||||
[40]: https://kyma-project.io
|
||||
[41]: https://kyma-project.io/docs/components/backup/#overview-overview
|
||||
|
||||
[50]: https://redhat.com
|
||||
[51]: https://github.com/fusor/mig-operator
|
||||
[52]: https://github.com/fusor/mig-operator/blob/master/docs/usage/2.md
|
||||
|
||||
[60]: https://banzaicloud.com
|
||||
[61]: https://banzaicloud.com/products/pipeline/
|
||||
[62]: https://banzaicloud.com/blog/vault-backup-velero/
|
||||
|
||||
[70]: https://dellemc.com
|
||||
[71]: https://dellemc.com/dataprotection
|
||||
[72]: https://www.dellemc.com/resources/en-us/asset/briefs-handouts/solutions/h18141-dellemc-dpd-kubernetes.pdf
|
||||
|
||||
[80]: https://sighup.io
|
||||
[81]: https://github.com/sighupio/fury-distribution
|
||||
[82]: https://github.com/sighupio/fury-kubernetes-dr
|
||||
|
||||
[90]: https://mayadata.io
|
||||
[91]: https://director.mayadata.io/
|
||||
[92]: https://help.mayadata.io/hc/en-us/articles/360033401591-DMaaS
|
||||
|
||||
[93]: https://okteto.com
|
||||
[94]: https://cloud.okteto.com
|
||||
[95]: https://okteto.com/enterprise/
|
||||
32
CHANGELOG.md
32
CHANGELOG.md
@@ -1,13 +1,10 @@
|
||||
## Current release:
|
||||
* [CHANGELOG-1.4.md][14]
|
||||
* [CHANGELOG-1.1.md][11]
|
||||
|
||||
## Development release:
|
||||
* [Unreleased Changes][0]
|
||||
|
||||
## Older releases:
|
||||
* [CHANGELOG-1.3.md][13]
|
||||
* [CHANGELOG-1.2.md][12]
|
||||
* [CHANGELOG-1.1.md][11]
|
||||
* [CHANGELOG-1.0.md][10]
|
||||
* [CHANGELOG-0.11.md][9]
|
||||
* [CHANGELOG-0.10.md][8]
|
||||
@@ -20,18 +17,15 @@
|
||||
* [CHANGELOG-0.3.md][1]
|
||||
|
||||
|
||||
[14]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.4.md
|
||||
[13]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.3.md
|
||||
[12]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.2.md
|
||||
[11]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.1.md
|
||||
[10]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.0.md
|
||||
[9]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.11.md
|
||||
[8]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.10.md
|
||||
[7]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.9.md
|
||||
[6]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.8.md
|
||||
[5]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.7.md
|
||||
[4]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.6.md
|
||||
[3]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.5.md
|
||||
[2]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.4.md
|
||||
[1]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-0.3.md
|
||||
[0]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/unreleased
|
||||
[11]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-1.1.md
|
||||
[10]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-1.0.md
|
||||
[9]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.11.md
|
||||
[8]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.10.md
|
||||
[7]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.9.md
|
||||
[6]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.8.md
|
||||
[5]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.7.md
|
||||
[4]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.6.md
|
||||
[3]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.5.md
|
||||
[2]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.4.md
|
||||
[1]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.3.md
|
||||
[0]: https://github.com/heptio/velero/blob/master/changelogs/unreleased
|
||||
|
||||
@@ -1,84 +1,37 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
# Velero Community Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
## Contributor Code of Conduct
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
## Our Standards
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
## Enforcement Responsibilities
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainer(s).
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [oss-coc@vmware.com](mailto:oss-coc@vmware.com). All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0,
|
||||
available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
|
||||
This Code of Conduct is adapted from the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and [Contributor Covenant](http://contributor-covenant.org/version/1/2/0/), version 1.2.0.
|
||||
|
||||
@@ -1,3 +1,70 @@
|
||||
# Contributing
|
||||
|
||||
Authors are expected to follow some guidelines when submitting PRs. Please see [our documentation](https://velero.io/docs/master/code-standards/) for details.
|
||||
## CHANGELOG
|
||||
|
||||
Authors are expected to include a changelog file with their pull requests. The changelog file
|
||||
should be a new file created in the `changelogs/unreleased` folder. The file should follow the
|
||||
naming convention of `pr-username` and the contents of the file should be your text for the
|
||||
changelog.
|
||||
|
||||
velero/changelogs/unreleased <- folder
|
||||
000-username <- file
|
||||
|
||||
|
||||
## DCO Sign off
|
||||
|
||||
All authors to the project retain copyright to their work. However, to ensure
|
||||
that they are only submitting work that they have rights to, we are requiring
|
||||
everyone to acknowledge this by signing their work.
|
||||
|
||||
Any copyright notices in this repo should specify the authors as "the Velero contributors".
|
||||
|
||||
To sign your work, just add a line like this at the end of your commit message:
|
||||
|
||||
```
|
||||
Signed-off-by: Joe Beda <joe@heptio.com>
|
||||
```
|
||||
|
||||
This can easily be done with the `--signoff` option to `git commit`.
|
||||
|
||||
By doing this you state that you can certify the following (from https://developercertificate.org/):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
19
Dockerfile-fsfreeze-pause
Normal file
19
Dockerfile-fsfreeze-pause
Normal file
@@ -0,0 +1,19 @@
|
||||
# Copyright 2018, 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c", "while true; do sleep 10000; done"]
|
||||
19
Dockerfile-fsfreeze-pause-ppc64le
Normal file
19
Dockerfile-fsfreeze-pause-ppc64le
Normal file
@@ -0,0 +1,19 @@
|
||||
# Copyright 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c", "while true; do sleep 10000; done"]
|
||||
@@ -12,15 +12,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:focal
|
||||
FROM ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ca-certificates wget bzip2 && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.6/restic_0.9.6_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.6_linux_amd64.bz2 && \
|
||||
mv restic_0.9.6_linux_amd64 /usr/bin/restic && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.4/restic_0.9.4_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.4_linux_amd64.bz2 && \
|
||||
mv restic_0.9.4_linux_amd64 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic && \
|
||||
apt-get remove -y wget bzip2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
@@ -28,6 +28,6 @@ RUN apt-get update && \
|
||||
|
||||
ADD /bin/linux/amd64/velero /velero
|
||||
|
||||
USER nobody:nogroup
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM arm32v7/ubuntu:focal
|
||||
|
||||
ADD /bin/linux/arm/restic /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/arm/velero /velero
|
||||
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
@@ -1,23 +0,0 @@
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM arm64v8/ubuntu:focal
|
||||
|
||||
ADD /bin/linux/arm64/restic /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/arm64/velero /velero
|
||||
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
@@ -12,14 +12,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ppc64le/ubuntu:focal
|
||||
FROM ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Prajyot Parab <prajyot.parab@ibm.com>"
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ca-certificates wget && \
|
||||
wget --quiet https://oplab9.parqtec.unicamp.br/pub/ppc64el/restic/restic-0.9.4 && \
|
||||
mv restic-0.9.4 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic && \
|
||||
apt-get remove -y wget && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD /bin/linux/ppc64le/restic /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/ppc64le/velero /velero
|
||||
|
||||
USER nobody:nogroup
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:focal
|
||||
FROM ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ADD /bin/linux/amd64/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nogroup
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM arm32v7/ubuntu:focal
|
||||
|
||||
ADD /bin/linux/arm/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM arm64v8/ubuntu:focal
|
||||
|
||||
ADD /bin/linux/arm64/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
@@ -12,12 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ppc64le/ubuntu:focal
|
||||
FROM ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Prajyot Parab <prajyot.parab@ibm.com>"
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ADD /bin/linux/ppc64le/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nogroup
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
|
||||
1146
Gopkg.lock
generated
Normal file
1146
Gopkg.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
134
Gopkg.toml
Normal file
134
Gopkg.toml
Normal file
@@ -0,0 +1,134 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
[prune]
|
||||
unused-packages = true
|
||||
non-go = true
|
||||
go-tests = true
|
||||
|
||||
#
|
||||
# Kubernetes packages
|
||||
#
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.14"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
# k8s.io/client-go kubernetes-1.14.0 uses v1.1.4
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
version = "=1.1.4"
|
||||
|
||||
#
|
||||
# Cloud provider packages
|
||||
#
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.13.12"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
version = "~21.4.0"
|
||||
|
||||
# k8s.io/client-go kubernetes-1.14.0 uses v11.1.2
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
version = "11.1.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
version = "0.11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/api"
|
||||
version = "~v0.3.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/oauth2"
|
||||
branch = "master"
|
||||
|
||||
#
|
||||
# Third party packages
|
||||
#
|
||||
[[constraint]]
|
||||
name = "github.com/robfig/cron"
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "~1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/afero"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "0.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/pflag"
|
||||
version = "1.0.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "~1.2.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
revision = "a1bc61569a26c0f65865932c0d55743b0567c494"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "~v1.3.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "~v1.19.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/joho/godotenv"
|
||||
version = "~v1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gobwas/glob"
|
||||
version = "~v0.2.3"
|
||||
|
||||
[[override]]
|
||||
name = "golang.org/x/sys"
|
||||
branch = "master"
|
||||
159
Makefile
159
Makefile
@@ -18,10 +18,10 @@
|
||||
BIN ?= velero
|
||||
|
||||
# This repo's root import path (under GOPATH).
|
||||
PKG := github.com/vmware-tanzu/velero
|
||||
PKG := github.com/heptio/velero
|
||||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= velero
|
||||
REGISTRY ?= gcr.io/heptio-images
|
||||
|
||||
# Which architecture to build - see $(ALL_ARCH) for options.
|
||||
# if the 'local' rule is being run, detect the ARCH from 'go env'
|
||||
@@ -33,57 +33,36 @@ VERSION ?= master
|
||||
|
||||
TAG_LATEST ?= false
|
||||
|
||||
# The version of restic binary to be downloaded for power architecture
|
||||
RESTIC_VERSION ?= 0.9.6
|
||||
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 windows-amd64 linux-ppc64le
|
||||
CONTAINER_PLATFORMS ?= linux-amd64 linux-ppc64le linux-arm linux-arm64
|
||||
MANIFEST_PLATFORMS ?= amd64 ppc64le arm arm64
|
||||
|
||||
# set git sha and tree state
|
||||
GIT_SHA = $(shell git rev-parse HEAD)
|
||||
GIT_DIRTY = $(shell git status --porcelain 2> /dev/null)
|
||||
|
||||
###
|
||||
### These variables should not need tweaking.
|
||||
###
|
||||
|
||||
CLI_PLATFORMS := linux-amd64 linux-arm linux-arm64 darwin-amd64 windows-amd64 linux-ppc64le
|
||||
CONTAINER_PLATFORMS := linux-amd64 linux-arm linux-arm64 linux-ppc64le
|
||||
|
||||
platform_temp = $(subst -, ,$(ARCH))
|
||||
GOOS = $(word 1, $(platform_temp))
|
||||
GOARCH = $(word 2, $(platform_temp))
|
||||
|
||||
# TODO(ncdc): support multiple image architectures once gcr.io supports manifest lists
|
||||
# Set default base image dynamically for each arch
|
||||
ifeq ($(GOARCH),amd64)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)
|
||||
local-arch:
|
||||
@echo "local environment for amd64 is up-to-date"
|
||||
endif
|
||||
ifeq ($(GOARCH),arm)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)-arm
|
||||
local-arch:
|
||||
@mkdir -p _output/bin/linux/arm/
|
||||
@wget -q -O - https://github.com/restic/restic/releases/download/v$(RESTIC_VERSION)/restic_$(RESTIC_VERSION)_linux_arm.bz2 | bunzip2 > _output/bin/linux/arm/restic
|
||||
@chmod a+x _output/bin/linux/arm/restic
|
||||
endif
|
||||
ifeq ($(GOARCH),arm64)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)-arm64
|
||||
local-arch:
|
||||
@mkdir -p _output/bin/linux/arm64/
|
||||
@wget -q -O - https://github.com/restic/restic/releases/download/v$(RESTIC_VERSION)/restic_$(RESTIC_VERSION)_linux_arm64.bz2 | bunzip2 > _output/bin/linux/arm64/restic
|
||||
@chmod a+x _output/bin/linux/arm64/restic
|
||||
endif
|
||||
#ifeq ($(GOARCH),arm)
|
||||
# DOCKERFILE ?= Dockerfile.arm #armel/busybox
|
||||
#endif
|
||||
#ifeq ($(GOARCH),arm64)
|
||||
# DOCKERFILE ?= Dockerfile.arm64 #aarch64/busybox
|
||||
#endif
|
||||
ifeq ($(GOARCH),ppc64le)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)-ppc64le
|
||||
local-arch:
|
||||
RESTIC_VERSION=$(RESTIC_VERSION) \
|
||||
./hack/get-restic-ppc64le.sh
|
||||
endif
|
||||
|
||||
MULTIARCH_IMAGE = $(REGISTRY)/$(BIN)
|
||||
IMAGE ?= $(REGISTRY)/$(BIN)-$(GOARCH)
|
||||
IMAGE = $(REGISTRY)/$(BIN)
|
||||
|
||||
# If you want to build all binaries, see the 'all-build' rule.
|
||||
# If you want to build all containers, see the 'all-containers' rule.
|
||||
# If you want to build all containers, see the 'all-container' rule.
|
||||
# If you want to build AND push all containers, see the 'all-push' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@@ -91,25 +70,18 @@ all:
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@$(MAKE) --no-print-directory ARCH=$* build BIN=velero-restic-restore-helper
|
||||
|
||||
container-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* container
|
||||
@$(MAKE) --no-print-directory ARCH=$* container BIN=velero-restic-restore-helper
|
||||
#container-%:
|
||||
# @$(MAKE) --no-print-directory ARCH=$* container
|
||||
|
||||
push-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* push
|
||||
@$(MAKE) --no-print-directory ARCH=$* push BIN=velero-restic-restore-helper
|
||||
#push-%:
|
||||
# @$(MAKE) --no-print-directory ARCH=$* push
|
||||
|
||||
all-build: $(addprefix build-, $(CLI_PLATFORMS))
|
||||
|
||||
all-containers: $(addprefix container-, $(CONTAINER_PLATFORMS))
|
||||
#all-container: $(addprefix container-, $(CONTAINER_PLATFORMS))
|
||||
|
||||
all-push: $(addprefix push-, $(CONTAINER_PLATFORMS))
|
||||
|
||||
all-manifests:
|
||||
@$(MAKE) manifest
|
||||
@$(MAKE) manifest BIN=velero-restic-restore-helper
|
||||
#all-push: $(addprefix push-, $(CONTAINER_PLATFORMS))
|
||||
|
||||
local: build-dirs
|
||||
GOOS=$(GOOS) \
|
||||
@@ -117,8 +89,6 @@ local: build-dirs
|
||||
VERSION=$(VERSION) \
|
||||
PKG=$(PKG) \
|
||||
BIN=$(BIN) \
|
||||
GIT_SHA=$(GIT_SHA) \
|
||||
GIT_DIRTY="$(GIT_DIRTY)" \
|
||||
OUTPUT_DIR=$$(pwd)/_output/bin/$(GOOS)/$(GOARCH) \
|
||||
./hack/build.sh
|
||||
|
||||
@@ -132,8 +102,6 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
||||
VERSION=$(VERSION) \
|
||||
PKG=$(PKG) \
|
||||
BIN=$(BIN) \
|
||||
GIT_SHA=$(GIT_SHA) \
|
||||
GIT_DIRTY=\"$(GIT_DIRTY)\" \
|
||||
OUTPUT_DIR=/output/$(GOOS)/$(GOARCH) \
|
||||
./hack/build.sh'"
|
||||
|
||||
@@ -143,32 +111,48 @@ BUILDER_IMAGE := velero-builder
|
||||
|
||||
# Example: make shell CMD="date > datefile"
|
||||
shell: build-dirs build-image
|
||||
@# bind-mount the Velero root dir in at /github.com/vmware-tanzu/velero
|
||||
@# because the Kubernetes code-generator tools require the project to
|
||||
@# exist in a directory hierarchy ending like this (but *NOT* necessarily
|
||||
@# under $GOPATH).
|
||||
@# the volume bind-mount of $PWD/vendor/k8s.io/api is needed for code-gen to
|
||||
@# function correctly (ref. https://github.com/kubernetes/kubernetes/pull/64567)
|
||||
@docker run \
|
||||
-e GOFLAGS \
|
||||
-i $(TTY) \
|
||||
--rm \
|
||||
-u $$(id -u):$$(id -g) \
|
||||
-v "$$(pwd):/github.com/vmware-tanzu/velero:delegated" \
|
||||
-v "$$(pwd)/_output/bin:/output:delegated" \
|
||||
-v "$$(pwd)/vendor/k8s.io/api:/go/src/k8s.io/api:delegated" \
|
||||
-v "$$(pwd)/.go/pkg:/go/pkg:delegated" \
|
||||
-v "$$(pwd)/.go/std:/go/std:delegated" \
|
||||
-v "$$(pwd):/go/src/$(PKG):delegated" \
|
||||
-v "$$(pwd)/_output/bin:/output:delegated" \
|
||||
-v "$$(pwd)/.go/std/$(GOOS)/$(GOARCH):/usr/local/go/pkg/$(GOOS)_$(GOARCH)_static:delegated" \
|
||||
-v "$$(pwd)/.go/go-build:/.cache/go-build:delegated" \
|
||||
-w /github.com/vmware-tanzu/velero \
|
||||
-w /go/src/$(PKG) \
|
||||
$(BUILDER_IMAGE) \
|
||||
/bin/sh $(CMD)
|
||||
|
||||
DOTFILE_IMAGE = $(subst :,_,$(subst /,_,$(IMAGE))-$(VERSION))
|
||||
|
||||
# Use a slightly customized build/push targets since we don't have a Go binary to build for the fsfreeze image
|
||||
build-fsfreeze: BIN = fsfreeze-pause
|
||||
build-fsfreeze:
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN).alpine
|
||||
@docker build --pull -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN).alpine _output
|
||||
@docker images -q $(IMAGE):$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
push-fsfreeze: BIN = fsfreeze-pause
|
||||
push-fsfreeze:
|
||||
@docker push $(IMAGE):$(VERSION)
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
docker push $(IMAGE):latest
|
||||
endif
|
||||
@docker images -q $(REGISTRY)/fsfreeze-pause:$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
all-containers:
|
||||
$(MAKE) container
|
||||
$(MAKE) container BIN=velero-restic-restore-helper
|
||||
$(MAKE) build-fsfreeze
|
||||
|
||||
container: local-arch .container-$(DOTFILE_IMAGE) container-name
|
||||
container: verify test .container-$(DOTFILE_IMAGE) container-name
|
||||
.container-$(DOTFILE_IMAGE): _output/bin/$(GOOS)/$(GOARCH)/$(BIN) $(DOCKERFILE)
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH)
|
||||
@docker build --pull -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
|
||||
@@ -177,6 +161,12 @@ container: local-arch .container-$(DOTFILE_IMAGE) container-name
|
||||
container-name:
|
||||
@echo "container: $(IMAGE):$(VERSION)"
|
||||
|
||||
all-push:
|
||||
$(MAKE) push
|
||||
$(MAKE) push BIN=velero-restic-restore-helper
|
||||
$(MAKE) push-fsfreeze
|
||||
|
||||
|
||||
push: .push-$(DOTFILE_IMAGE) push-name
|
||||
.push-$(DOTFILE_IMAGE): .container-$(DOTFILE_IMAGE)
|
||||
@docker push $(IMAGE):$(VERSION)
|
||||
@@ -189,20 +179,6 @@ endif
|
||||
push-name:
|
||||
@echo "pushed: $(IMAGE):$(VERSION)"
|
||||
|
||||
manifest: .manifest-$(MULTIARCH_IMAGE) manifest-name
|
||||
.manifest-$(MULTIARCH_IMAGE):
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(MULTIARCH_IMAGE):$(VERSION) \
|
||||
$(foreach arch, $(MANIFEST_PLATFORMS), $(MULTIARCH_IMAGE)-$(arch):$(VERSION))
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push --purge $(MULTIARCH_IMAGE):$(VERSION)
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(MULTIARCH_IMAGE):latest \
|
||||
$(foreach arch, $(MANIFEST_PLATFORMS), $(MULTIARCH_IMAGE)-$(arch):latest)
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push --purge $(MULTIARCH_IMAGE):latest
|
||||
endif
|
||||
|
||||
manifest-name:
|
||||
@echo "pushed: $(MULTIARCH_IMAGE):$(VERSION)"
|
||||
|
||||
SKIP_TESTS ?=
|
||||
test: build-dirs
|
||||
ifneq ($(SKIP_TESTS), 1)
|
||||
@@ -234,42 +210,13 @@ clean:
|
||||
rm -rf .go _output
|
||||
docker rmi $(BUILDER_IMAGE)
|
||||
|
||||
.PHONY: modules
|
||||
modules:
|
||||
go mod tidy
|
||||
|
||||
.PHONY: verify-modules
|
||||
verify-modules: modules
|
||||
@if !(git diff --quiet HEAD -- go.sum go.mod); then \
|
||||
echo "go module files are out of date, please commit the changes to go.mod and go.sum"; exit 1; \
|
||||
fi
|
||||
|
||||
ci: verify-modules verify all test
|
||||
ci: all verify test
|
||||
|
||||
changelog:
|
||||
hack/changelog.sh
|
||||
|
||||
# release builds a GitHub release using goreleaser within the build container.
|
||||
#
|
||||
# To dry-run the release, which will build the binaries/artifacts locally but
|
||||
# will *not* create a GitHub release:
|
||||
# GITHUB_TOKEN=an-invalid-token-so-you-dont-accidentally-push-release \
|
||||
# RELEASE_NOTES_FILE=changelogs/CHANGELOG-1.2.md \
|
||||
# PUBLISH=false \
|
||||
# make release
|
||||
#
|
||||
# To run the release, which will publish a *DRAFT* GitHub release in github.com/vmware-tanzu/velero
|
||||
# (you still need to review/publish the GitHub release manually):
|
||||
# GITHUB_TOKEN=your-github-token \
|
||||
# RELEASE_NOTES_FILE=changelogs/CHANGELOG-1.2.md \
|
||||
# PUBLISH=true \
|
||||
# make release
|
||||
release:
|
||||
$(MAKE) shell CMD="-c '\
|
||||
GITHUB_TOKEN=$(GITHUB_TOKEN) \
|
||||
RELEASE_NOTES_FILE=$(RELEASE_NOTES_FILE) \
|
||||
PUBLISH=$(PUBLISH) \
|
||||
./hack/goreleaser.sh'"
|
||||
hack/goreleaser.sh
|
||||
|
||||
serve-docs:
|
||||
docker run \
|
||||
@@ -298,7 +245,7 @@ serve-docs:
|
||||
# tagged version. Once the unstaged changes are ready, they can be added to the
|
||||
# staging area using 'git add' and then committed.
|
||||
#
|
||||
# To run gen-docs: "NEW_DOCS_VERSION=v1.4 VELERO_VERSION=v1.4.0 make gen-docs"
|
||||
# To run gen-docs: "NEW_DOCS_VERSION=v1.1.0 make gen-docs"
|
||||
#
|
||||
# **NOTE**: there are additional manual steps required to finalize the process of generating
|
||||
# a new versioned docs site. The full process is documented in site/README-JEKYLL.md.
|
||||
|
||||
50
README.md
50
README.md
@@ -4,7 +4,7 @@
|
||||
|
||||
## Overview
|
||||
|
||||
Velero (formerly Heptio Ark) gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. You can run Velero with a public cloud platform or on-premises. Velero lets you:
|
||||
Velero (formerly Heptio Ark) gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Velero lets you:
|
||||
|
||||
* Take backups of your cluster and restore in case of loss.
|
||||
* Migrate cluster resources to other clusters.
|
||||
@@ -15,9 +15,18 @@ Velero consists of:
|
||||
* A server that runs on your cluster
|
||||
* A command-line client that runs locally
|
||||
|
||||
## Documentation
|
||||
You can run Velero in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99].
|
||||
|
||||
[The documentation][29] provides a getting started guide and information about building from source, architecture, extending Velero, and more.
|
||||
## Installation
|
||||
|
||||
We strongly recommend that you use an [official release][6] of Velero. The tarballs for each release contain the
|
||||
`velero` command-line client. Follow the [installation instructions][28] to get started.
|
||||
|
||||
_The code and sample YAML files in the master branch of the Velero repository are under active development and are not guaranteed to be stable. Use them at your own risk!_
|
||||
|
||||
## More information
|
||||
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Velero, and more.
|
||||
|
||||
Please use the version selector at the top of the site to ensure you are using the appropriate documentation for your version of Velero.
|
||||
|
||||
@@ -27,24 +36,45 @@ If you encounter issues, review the [troubleshooting docs][30], [file an issue][
|
||||
|
||||
## Contributing
|
||||
|
||||
If you are ready to jump in and test, add code, or help with documentation, follow the instructions on our [Start contributing][31] documentation for guidance on how to setup Velero for development.
|
||||
Thanks for taking the time to join our community and start contributing!
|
||||
|
||||
Feedback and discussion are available on [the mailing list][24].
|
||||
|
||||
### Before you start
|
||||
|
||||
* Please familiarize yourself with the [Code of Conduct][8] before contributing.
|
||||
* See [CONTRIBUTING.md][5] for instructions on the developer certificate of origin that we require.
|
||||
* Read how [we're using ZenHub][26] for project and roadmap planning
|
||||
|
||||
### Pull requests
|
||||
|
||||
* We welcome pull requests. Feel free to dig through the [issues][4] and jump in.
|
||||
|
||||
## Changelog
|
||||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[1]: https://github.com/vmware-tanzu/velero/workflows/Master%20CI/badge.svg
|
||||
[2]: https://github.com/vmware-tanzu/velero/actions?query=workflow%3A"Master+CI"
|
||||
[4]: https://github.com/vmware-tanzu/velero/issues
|
||||
[6]: https://github.com/vmware-tanzu/velero/releases
|
||||
[1]: https://travis-ci.org/heptio/velero.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/velero
|
||||
|
||||
[4]: https://github.com/heptio/velero/issues
|
||||
[5]: https://github.com/heptio/velero/blob/master/CONTRIBUTING.md
|
||||
[6]: https://github.com/heptio/velero/releases
|
||||
|
||||
[8]: https://github.com/heptio/velero/blob/master/CODE_OF_CONDUCT.md
|
||||
[9]: https://kubernetes.io/docs/setup/
|
||||
[10]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos
|
||||
[11]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1
|
||||
[12]: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/README.md
|
||||
[14]: https://github.com/kubernetes/kubernetes
|
||||
|
||||
[24]: https://groups.google.com/forum/#!forum/projectvelero
|
||||
[25]: https://kubernetes.slack.com/messages/velero
|
||||
[26]: https://velero.io/docs/zenhub
|
||||
|
||||
[28]: https://velero.io/docs/install-overview
|
||||
[29]: https://velero.io/docs/
|
||||
[30]: https://velero.io/docs/troubleshooting
|
||||
[31]: https://velero.io/docs/start-contributing
|
||||
[100]: https://velero.io/docs/master/img/velero.png
|
||||
|
||||
[99]: https://velero.io/docs/support-matrix
|
||||
[100]: /site/docs/master/img/velero.png
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# Velero Support
|
||||
|
||||
Thanks for trying out Velero! We welcome all feedback, find all the ways to connect with us on our Community page:
|
||||
Thanks for trying out Velero! We welcome all feedback, please consider joining our mailing list:
|
||||
|
||||
- [Velero Community](https://velero.io/community/)
|
||||
|
||||
You can find details on the Velero maintainers' support process [here](https://velero.io/docs/master/support-process/).
|
||||
- [Mailing List](https://groups.google.com/forum/#!forum/projectvelero)
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
## v1.2.0
|
||||
#### 2019-11-07
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.2.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.2.0`
|
||||
|
||||
Please note that as of this release we are no longer publishing new container images to `gcr.io/heptio-images`. The existing ones will remain there for the foreseeable future.
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.2.0/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.2.0/upgrade-to-1.2/
|
||||
|
||||
### Highlights
|
||||
## Moving Cloud Provider Plugins Out of Tree
|
||||
|
||||
Velero has had built-in support for AWS, Microsoft Azure, and Google Cloud Platform (GCP) since day 1. When Velero moved to a plugin architecture for object store providers and volume snapshotters in version 0.6, the code for these three providers was converted to use the plugin interface provided by this new architecture, but the cloud provider code still remained inside the Velero codebase. This put the AWS, Azure, and GCP plugins in a different position compared with other providers’ plugins, since they automatically shipped with the Velero binary and could include documentation in-tree.
|
||||
|
||||
With version 1.2, we’ve extracted the AWS, Azure, and GCP plugins into their own repositories, one per provider. We now also publish one plugin image per provider. This change brings these providers to parity with other providers’ plugin implementations, reduces the size of the core Velero binary by not requiring each provider’s SDK to be included, and opens the door for the plugins to be maintained and released independently of core Velero.
|
||||
|
||||
## Restic Integration Improvements
|
||||
|
||||
We’ve continued to work on improving Velero’s restic integration. With this release, we’ve made the following enhancements:
|
||||
|
||||
- Restic backup and restore progress is now captured during execution and visible to the user through the `velero backup/restore describe --details` command. The details are updated every 10 seconds. This provides a new level of visibility into restic operations for users.
|
||||
- Restic backups of persistent volume claims (PVCs) now remain incremental across the rescheduling of a pod. Previously, if the pod using a PVC was rescheduled, the next restic backup would require a full rescan of the volume’s contents. This improvement potentially makes such backups significantly faster.
|
||||
- Read-write-many volumes are no longer backed up once for every pod using the volume, but instead just once per Velero backup. This improvement speeds up backups and prevents potential restore issues due to multiple copies of the backup being processed simultaneously.
|
||||
|
||||
|
||||
## Clone PVs When Cloning a Namespace
|
||||
|
||||
Before version 1.2, you could clone a Kubernetes namespace by backing it up and then restoring it to a different namespace in the same cluster by using the `--namespace-mappings` flag with the `velero restore create` command. However, in this scenario, Velero was unable to clone persistent volumes used by the namespace, leading to errors for users.
|
||||
|
||||
In version 1.2, Velero automatically detects when you are trying to clone an existing namespace, and clones the persistent volumes used by the namespace as well. This doesn’t require the user to specify any additional flags for the `velero restore create` command. This change lets you fully achieve your goal of cloning namespaces using persistent storage within a cluster.
|
||||
|
||||
## Improved Server-Side Encryption Support
|
||||
|
||||
To help you secure your important backup data, we’ve added support for more forms of server-side encryption of backup data on both AWS and GCP. Specifically:
|
||||
- On AWS, Velero now supports Amazon S3-managed encryption keys (SSE-S3), which uses AES256 encryption, by specifying `serverSideEncryption: AES256` in a backup storage location’s config.
|
||||
- On GCP, Velero now supports using a specific Cloud KMS key for server-side encryption by specifying `kmsKeyName: <key name>` in a backup storage location’s config.
|
||||
|
||||
## CRD Structural Schema
|
||||
|
||||
In Kubernetes 1.16, custom resource definitions (CRDs) reached general availability. Structural schemas are required for CRDs created in the `apiextensions.k8s.io/v1` API group. Velero now defines a structural schema for each of its CRDs and automatically applies it the user runs the `velero install` command. The structural schemas enable the user to get quicker feedback when their backup, restore, or schedule request is invalid, so they can immediately remediate their request.
|
||||
|
||||
### All Changes
|
||||
* Ensure object store plugin processes are cleaned up after restore and after BSL validation during server start up (#2041, @betta1)
|
||||
* bug fix: don't try to restore pod volume backups that don't have a snapshot ID (#2031, @skriss)
|
||||
* Restore Documentation: Updated Restore Documentation with Clarification implications of removing restore object. (#1957, @nainav)
|
||||
* add `--allow-partially-failed` flag to `velero restore create` for use with `--from-schedule` to allow partially-failed backups to be restored (#1994, @skriss)
|
||||
* Allow backup storage locations to specify backup sync period or toggle off sync (#1936, @betta1)
|
||||
* Remove cloud provider code (#1985, @carlisia)
|
||||
* Restore action for cluster/namespace role bindings (#1974, @alexander-demichev)
|
||||
* Add `--no-default-backup-location` flag to `velero install` (#1931, @Frank51)
|
||||
* If includeClusterResources is nil/auto, pull in necessary CRDs in backupResource (#1831, @sseago)
|
||||
* Azure: add support for Azure China/German clouds (#1938, @andyzhangx)
|
||||
* Add a new required `--plugins` flag for `velero install` command. `--plugins` takes a list of container images to add as initcontainers. (#1930, @nrb)
|
||||
* restic: only backup read-write-many PVCs at most once, even if they're annotated for backup from multiple pods. (#1896, @skriss)
|
||||
* Azure: add support for cross-subscription backups (#1895, @boxcee)
|
||||
* adds `insecureSkipTLSVerify` server config for AWS storage and `--insecure-skip-tls-verify` flag on client for self-signed certs (#1793, @s12chung)
|
||||
* Add check to update resource field during backupItem (#1904, @spiffcs)
|
||||
* Add `LD_LIBRARY_PATH` (=/plugins) to the env variables of velero deployment. (#1893, @lintongj)
|
||||
* backup sync controller: stop using `metadata/revision` file, do a full diff of bucket contents vs. cluster contents each sync interval (#1892, @skriss)
|
||||
* bug fix: during restore, check item's original namespace, not the remapped one, for inclusion/exclusion (#1909, @skriss)
|
||||
* adds structural schema to Velero CRDs created on Velero install, enabling validation of Velero API fields (#1898, @prydonius)
|
||||
* GCP: add support for specifying a Cloud KMS key name to use for encrypting backups in a storage location. (#1879, @skriss)
|
||||
* AWS: add support for SSE-S3 AES256 encryption via `serverSideEncryption` config field in BackupStorageLocation (#1869, @skriss)
|
||||
* change default `restic prune` interval to 7 days, add `velero server/install` flags for specifying an alternate default value. (#1864, @skriss)
|
||||
* velero install: if `--use-restic` and `--wait` are specified, wait up to a minute for restic daemonset to be ready (#1859, @skriss)
|
||||
* report restore progress in PodVolumeRestores and expose progress in the velero restore describe --details command (#1854, @prydonius)
|
||||
* Jekyll Site updates - modifies documentation to use a wider layout; adds better markdown table formatting (#1848, @ccbayer)
|
||||
* fix excluding additional items with the velero.io/exclude-from-backup=true label (#1843, @prydonius)
|
||||
* report backup progress in PodVolumeBackups and expose progress in the velero backup describe --details command. Also upgrades restic to v0.9.5 (#1821, @prydonius)
|
||||
* Add `--features` argument to all velero commands to provide feature flags that can control enablement of pre-release features. (#1798, @nrb)
|
||||
* when backing up PVCs with restic, specify `--parent` flag to prevent full volume rescans after pod reschedules (#1807, @skriss)
|
||||
* remove 'restic check' calls from before/after 'restic prune' since they're redundant (#1794, @skriss)
|
||||
* fix error formatting due interpreting % as printf formatted strings (#1781, @s12chung)
|
||||
* when using `velero restore create --namespace-mappings ...` to create a second copy of a namespace in a cluster, create copies of the PVs used (#1779, @skriss)
|
||||
* adds --from-schedule flag to the `velero create backup` command to create a Backup from an existing Schedule (#1734, @prydonius)
|
||||
@@ -1,116 +0,0 @@
|
||||
## v1.3.2
|
||||
### 2020-04-03
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.3.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.3.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.3.2/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.3.2/upgrade-to-1.3/
|
||||
|
||||
### All Changes
|
||||
* Allow `plugins/` as a valid top-level directory within backup storage locations. This directory is a place for plugin authors to store arbitrary data as needed. It is recommended to create an additional subdirectory under `plugins/` specifically for your plugin, e.g. `plugins/my-plugin-data/`. (#2350, @skriss)
|
||||
* bug fix: don't panic in `velero restic repo get` when last maintenance time is `nil` (#2315, @skriss)
|
||||
|
||||
## v1.3.1
|
||||
### 2020-03-10
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.3.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.3.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.3.1/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.3.1/upgrade-to-1.3/
|
||||
|
||||
### Highlights
|
||||
|
||||
Fixed a bug that caused failures when backing up CustomResourceDefinitions with whole numbers in numeric fields.
|
||||
|
||||
### All Changes
|
||||
* Fix CRD backup failures when fields contained a whole number. (#2322, @nrb)
|
||||
|
||||
|
||||
## v1.3.0
|
||||
#### 2020-03-02
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.3.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.3.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.3.0/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.3.0/upgrade-to-1.3/
|
||||
|
||||
### Highlights
|
||||
|
||||
#### Custom Resource Definition Backup and Restore Improvements
|
||||
|
||||
This release includes a number of related bug fixes and improvements to how Velero backs up and restores custom resource definitions (CRDs) and instances of those CRDs.
|
||||
|
||||
We found and fixed three issues around restoring CRDs that were originally created via the `v1beta1` CRD API. The first issue affected CRDs that had the `PreserveUnknownFields` field set to `true`. These CRDs could not be restored into 1.16+ Kubernetes clusters, because the `v1` CRD API does not allow this field to be set to `true`. We added code to the restore process to check for this scenario, to set the `PreserveUnknownFields` field to `false`, and to instead set `x-kubernetes-preserve-unknown-fields` to `true` in the OpenAPIv3 structural schema, per Kubernetes guidance. For more information on this, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields). The second issue affected CRDs without structural schemas. These CRDs need to be backed up/restored through the `v1beta1` API, since all CRDs created through the `v1` API must have structural schemas. We added code to detect these CRDs and always back them up/restore them through the `v1beta1` API. Finally, related to the previous issue, we found that our restore code was unable to handle backups with multiple API versions for a given resource type, and we’ve remediated this as well.
|
||||
|
||||
We also improved the CRD restore process to enable users to properly restore CRDs and instances of those CRDs in a single restore operation. Previously, users found that they needed to run two separate restores: one to restore the CRD(s), and another to restore instances of the CRD(s). This was due to two deficiencies in the Velero code. First, Velero did not wait for a CRD to be fully accepted by the Kubernetes API server and ready for serving before moving on; and second, Velero did not refresh its cached list of available APIs in the target cluster after restoring CRDs, so it was not aware that it could restore instances of those CRDs.
|
||||
|
||||
We fixed both of these issues by (1) adding code to wait for CRDs to be “ready” after restore before moving on, and (2) refreshing the cached list of APIs after restoring CRDs, so any instances of newly-restored CRDs could subsequently be restored.
|
||||
|
||||
With all of these fixes and improvements in place, we hope that the CRD backup and restore experience is now seamless across all supported versions of Kubernetes.
|
||||
|
||||
|
||||
#### Multi-Arch Docker Images
|
||||
|
||||
Thanks to community members [@Prajyot-Parab](https://github.com/Prajyot-Parab) and [@shaneutt](https://github.com/shaneutt), Velero now provides multi-arch container images by using Docker manifest lists. We are currently publishing images for `linux/amd64`, `linux/arm64`, `linux/arm`, and `linux/ppc64le` in [our Docker repository](https://hub.docker.com/r/velero/velero/tags?page=1&name=v1.3&ordering=last_updated).
|
||||
|
||||
Users don’t need to change anything other than updating their version tag - the v1.3 image is `velero/velero:v1.3.0`, and Docker will automatically pull the proper architecture for the host.
|
||||
|
||||
For more information on manifest lists, see [Docker’s documentation](https://docs.docker.com/registry/spec/manifest-v2-2/).
|
||||
|
||||
|
||||
#### Bug Fixes, Usability Enhancements, and More
|
||||
|
||||
We fixed a large number of bugs and made some smaller usability improvements in this release. Here are a few highlights:
|
||||
|
||||
- Support private registries with custom ports for the restic restore helper image ([PR #1999](https://github.com/vmware-tanzu/velero/pull/1999), [@cognoz](https://github.com/cognoz))
|
||||
- Use AWS profile from BackupStorageLocation when invoking restic ([PR #2096](https://github.com/vmware-tanzu/velero/pull/2096), [@dinesh](https://github.com/dinesh))
|
||||
- Allow restores from schedules in other clusters ([PR #2218](https://github.com/vmware-tanzu/velero/pull/2218), [@cpanato](https://github.com/cpanato))
|
||||
- Fix memory leak & race condition in restore code ([PR #2201](https://github.com/vmware-tanzu/velero/pull/2201), [@skriss](https://github.com/skriss))
|
||||
|
||||
|
||||
### All Changes
|
||||
* Corrected the selfLink for Backup CR in site/docs/master/output-file-format.md (#2292, @RushinthJohn)
|
||||
* Back up schema-less CustomResourceDefinitions as v1beta1, even if they are retrieved via the v1 endpoint. (#2264, @nrb)
|
||||
* Bug fix: restic backup volume snapshot to the second location failed (#2244, @jenting)
|
||||
* Added support of using PV name from volumesnapshotter('SetVolumeID') in case of PV renaming during the restore (#2216, @mynktl)
|
||||
* Replaced deprecated helm repo url at all it appearance at docs. (#2209, @markrity)
|
||||
* added support for arm and arm64 images (#2227, @shaneutt)
|
||||
* when restoring from a schedule, validate by checking for backup(s) labeled with the schedule name rather than existence of the schedule itself, to allow for restoring from deleted schedules and schedules in other clusters (#2218, @cpanato)
|
||||
* bug fix: back up server-preferred version of CRDs rather than always the `v1beta1` version (#2230, @skriss)
|
||||
* Wait for CustomResourceDefinitions to be ready before restoring CustomResources. Also refresh the resource list from the Kubernetes API server after restoring CRDs in order to properly restore CRs. (#1937, @nrb)
|
||||
* When restoring a v1 CRD with PreserveUnknownFields = True, make sure that the preservation behavior is maintained by copying the flag into the Open API V3 schema, but update the flag so as to allow the Kubernetes API server to accept the CRD without error. (#2197, @nrb)
|
||||
* Enable pruning unknown CRD fields (#2187, @jenting)
|
||||
* bump restic to 0.9.6 to fix some issues with non AWS standard regions (#2210, @Sh4d1)
|
||||
* bug fix: fix race condition resulting in restores sometimes succeeding despite restic restore failures (#2201, @skriss)
|
||||
* Bug fix: Check for nil LastMaintenanceTime in ResticRepository dueForMaintenance (#2200, @sseago)
|
||||
* repopulate backup_last_successful_timestamp metrics for each schedule after server restart (#2196, @skriss)
|
||||
* added support for ppc64le images and manifest lists (#1768, @prajyot)
|
||||
* bug fix: only prioritize restoring `replicasets.apps`, not `replicasets.extensions` (#2157, @skriss)
|
||||
* bug fix: restore both `replicasets.apps` *and* `replicasets.extensions` before `deployments` (#2120, @skriss)
|
||||
* bug fix: don't restore cluster-scoped resources when restoring specific namespaces and IncludeClusterResources is nil (#2118, @skriss)
|
||||
* Enableing Velero to switch credentials (`AWS_PROFILE`) if multiple s3-compatible backupLocations are present (#2096, @dinesh)
|
||||
* bug fix: deep-copy backup's labels when constructing snapshot tags, so the PV name isn't added as a label to the backup (#2075, @skriss)
|
||||
* remove the `fsfreeze-pause` image being published from this repo; replace it with `ubuntu:bionic` in the nginx example app (#2068, @skriss)
|
||||
* add support for a private registry with a custom port in a restic-helper image (#1999, @cognoz)
|
||||
* return better error message to user when cluster config can't be found via `--kubeconfig`, `$KUBECONFIG`, or in-cluster config (#2057, @skriss)
|
||||
@@ -1,79 +0,0 @@
|
||||
## v1.4.2
|
||||
### 2020-07-13
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.4.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.4.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.4/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.4/upgrade-to-1.4/
|
||||
|
||||
### All Changes
|
||||
* log a warning instead of erroring if an additional item returned from a plugin can't be found in the Kubernetes API (#2595, @skriss)
|
||||
* Adjust restic default time out to 4 hours and base pod resource requests to 500m CPU/512Mi memory. (#2696, @nrb)
|
||||
* capture version of the CRD prior before invoking the remap_crd_version backup item action (#2683, @ashish-amarnath)
|
||||
|
||||
|
||||
## v1.4.1
|
||||
|
||||
This tag was created in code, but has no associated docker image due to misconfigured building infrastructure. v1.4.2 fixes this.
|
||||
|
||||
## v1.4.0
|
||||
### 2020-05-26
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.4.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.4.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.4/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.4/upgrade-to-1.4/
|
||||
|
||||
### Highlights
|
||||
|
||||
* Added beta-level CSI support!
|
||||
* Added custom CA certificate support
|
||||
* Backup progress reporting
|
||||
* Changed backup tarball format to support all versions of a given resource
|
||||
|
||||
### All Changes
|
||||
* increment restic volumesnapshot count after successful pvb create (#2542, @ashish-amarnath)
|
||||
* Add details of CSI volumesnapshotcontents associated with a backup to `velero backup describe` when the `EnableCSI` feature flag is given on the velero client. (#2448, @nrb)
|
||||
* Allow users the option to retrieve all versions of a given resource (instead of just the preferred version) from the API server with the `EnableAPIGroupVersions` feature flag. (#2373, @brito-rafa)
|
||||
* Changed backup tarball format to store all versions of a given resource, updated backup tarball format to 1.1.0. (#2373, @brito-rafa)
|
||||
* allow feature flags to be passed from install CLI (#2503, @ashish-amarnath)
|
||||
* sync backups' CSI API objects into the cluster as part of the backup sync controller (#2496, @ashish-amarnath)
|
||||
* bug fix: in error location logging hook, if the item logged under the `error` key doesn't implement the `error` interface, don't return an error since this is a valid scenario (#2487, @skriss)
|
||||
* bug fix: in CRD restore plugin, don't use runtime.DefaultUnstructuredConverter.FromUnstructured(...) to avoid conversion issues when float64 fields contain int values (#2484, @skriss)
|
||||
* during backup deletion also delete CSI volumesnapshotcontents that were created as a part of the backup but the associated volumesnapshot object does not exist (#2480, @ashish-amarnath)
|
||||
* If plugins don't support the `--features` flag, don't pass it to them. Also, update the standard plugin server to ignore unknown flags. (#2479, @skriss)
|
||||
* At backup time, if a CustomResourceDefinition appears to have been created via the v1beta1 endpoint, retrieve it from the v1beta1 endpoint instead of simply changing the APIVersion. (#2478, @nrb)
|
||||
* update container base images from ubuntu:bionic to ubuntu:focal (#2471, @skriss)
|
||||
* bug fix: when a resource includes/excludes list contains unresolvable items, don't remove them from the list, so that the list doesn't inadvertently end up matching *all* resources. (#2462, @skriss)
|
||||
* Azure: add support for getting storage account key for restic directly from an environment variable (#2455, @jaygridley)
|
||||
* Support to skip VSL validation for the backup having SnapshotVolumes set to false or created with `--snapshot-volumes=false` (#2450, @mynktl)
|
||||
* report backup progress (number of items backed up so far out of an estimated total number of items) during backup in the logs and as status fields on the Backup custom resource (#2440, @skriss)
|
||||
* bug fix: populate namespace in logs for backup errors (#2438, @skriss)
|
||||
* during backup deletion also delete CSI volumesnapshots that were created as a part of the backup (#2411, @ashish-amarnath)
|
||||
* bump Kubernetes module dependencies to v0.17.4 to get fix for https://github.com/kubernetes/kubernetes/issues/86149 (#2407, @skriss)
|
||||
* bug fix: save PodVolumeBackup manifests to object storage even if the volume was empty, so that on restore, the PV is dynamically reprovisioned if applicable (#2390, @skriss)
|
||||
* Adding new restoreItemAction for PVC to update the selected-node annotation (#2377, @mynktl)
|
||||
* Added a --cacert flag to the install command to provide the CA bundle to use when verifying TLS connections to object storage (#2368, @mansam)
|
||||
* Added a `--cacert` flag to the velero client describe, download, and logs commands to allow passing a path to a certificate to use when verifying TLS connections to object storage. Also added a corresponding client config option called `cacert` which takes a path to a certificate bundle to use as a default when `--cacert` is not specified. (#2364, @mansam)
|
||||
* support setting a custom CA certificate on a BSL to use when verifying TLS connections (#2353, @mansam)
|
||||
* adding annotations on backup CRD for k8s major, minor and git versions (#2346, @brito-rafa)
|
||||
* When the EnableCSI feature flag is provided, upload CSI VolumeSnapshots and VolumeSnapshotContents to object storage as gzipped JSON. (#2323, @nrb)
|
||||
* add CSI snapshot API types into default restore priorities (#2318, @ashish-amarnath)
|
||||
* refactoring: wait for all informer caches to sync before running controllers (#2299, @skriss)
|
||||
* refactor restore code to lazily resolve resources via discovery and eliminate second restore loop for instances of restored CRDs (#2248, @skriss)
|
||||
* upgrade to go 1.14 and migrate from `dep` to go modules (#2214, @skriss)
|
||||
* clarify the wording for restore describe for namespaces included
|
||||
0
changelogs/unreleased/.keep
Normal file
0
changelogs/unreleased/.keep
Normal file
1
changelogs/unreleased/1781-s12chung
Normal file
1
changelogs/unreleased/1781-s12chung
Normal file
@@ -0,0 +1 @@
|
||||
fix error formatting due interpreting % as printf formatted strings
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/velero"
|
||||
"github.com/heptio/velero/pkg/cmd"
|
||||
"github.com/heptio/velero/pkg/cmd/velero"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,131 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (unknown)
|
||||
labels:
|
||||
component: velero
|
||||
name: backupstoragelocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
names:
|
||||
kind: BackupStorageLocation
|
||||
listKind: BackupStorageLocationList
|
||||
plural: backupstoragelocations
|
||||
singular: backupstoragelocation
|
||||
scope: ""
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: BackupStorageLocation is a location where Velero stores backup
|
||||
objects.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: BackupStorageLocationSpec defines the specification for a
|
||||
Velero BackupStorageLocation.
|
||||
properties:
|
||||
accessMode:
|
||||
description: AccessMode defines the permissions for the backup storage
|
||||
location.
|
||||
enum:
|
||||
- ReadOnly
|
||||
- ReadWrite
|
||||
type: string
|
||||
backupSyncPeriod:
|
||||
description: BackupSyncPeriod defines how frequently to sync backup
|
||||
API objects from object storage. A value of 0 disables sync.
|
||||
nullable: true
|
||||
type: string
|
||||
config:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Config is for provider-specific configuration fields.
|
||||
type: object
|
||||
objectStorage:
|
||||
description: ObjectStorageLocation specifies the settings necessary
|
||||
to connect to a provider's object storage.
|
||||
properties:
|
||||
bucket:
|
||||
description: Bucket is the bucket to use for object storage.
|
||||
type: string
|
||||
prefix:
|
||||
description: Prefix is the path inside a bucket to use for Velero
|
||||
storage. Optional.
|
||||
type: string
|
||||
required:
|
||||
- bucket
|
||||
type: object
|
||||
provider:
|
||||
description: Provider is the provider of the backup storage.
|
||||
type: string
|
||||
required:
|
||||
- objectStorage
|
||||
- provider
|
||||
type: object
|
||||
status:
|
||||
description: BackupStorageLocationStatus describes the current status
|
||||
of a Velero BackupStorageLocation.
|
||||
properties:
|
||||
accessMode:
|
||||
description: "AccessMode is an unused field. \n Deprecated: there
|
||||
is now an AccessMode field on the Spec and this field will be removed
|
||||
entirely as of v2.0."
|
||||
enum:
|
||||
- ReadOnly
|
||||
- ReadWrite
|
||||
type: string
|
||||
lastSyncedRevision:
|
||||
description: "LastSyncedRevision is the value of the `metadata/revision`
|
||||
file in the backup storage location the last time the BSL's contents
|
||||
were synced into the cluster. \n Deprecated: this field is no longer
|
||||
updated or used for detecting changes to the location's contents
|
||||
and will be removed entirely in v2.0."
|
||||
type: string
|
||||
lastSyncedTime:
|
||||
description: LastSyncedTime is the last time the contents of the location
|
||||
were synced into the cluster.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
phase:
|
||||
description: Phase is the current state of the BackupStorageLocation.
|
||||
enum:
|
||||
- Available
|
||||
- Unavailable
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
config:
|
||||
region: minio
|
||||
s3ForcePathStyle: "true"
|
||||
s3Url: http://minio.velero.svc:9000
|
||||
objectStorage:
|
||||
bucket: velero
|
||||
provider: aws
|
||||
@@ -1,89 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: velero
|
||||
name: velero
|
||||
namespace: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
deploy: velero
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/path: /metrics
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
component: velero
|
||||
deploy: velero
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
command:
|
||||
- /velero
|
||||
env:
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
- name: VELERO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_LIBRARY_PATH
|
||||
value: /plugins
|
||||
name: velero
|
||||
image: velero/velero:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8085
|
||||
name: metrics
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- mountPath: /scratch
|
||||
name: scratch
|
||||
restartPolicy: Always
|
||||
serviceAccountName: velero
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: scratch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
component: velero
|
||||
name: velero
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: velero
|
||||
namespace: velero
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
component: velero
|
||||
name: velero
|
||||
namespace: velero
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
component: velero
|
||||
name: velero
|
||||
spec: {}
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- CRDs.yaml
|
||||
- backupstoragelocations.yaml
|
||||
- volumesnapshotlocations.yaml # including so the velero server can run
|
||||
- resticrepository.yaml # including so the velero server can runl
|
||||
- podvolumes.yaml # including so the velero server can runl
|
||||
- minio.yaml
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
# Copyright 2017 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: velero
|
||||
name: minio
|
||||
labels:
|
||||
component: minio
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
component: minio
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: minio
|
||||
spec:
|
||||
volumes:
|
||||
- name: storage
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: minio
|
||||
image: minio/minio:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- server
|
||||
- /storage
|
||||
- --config-dir=/config
|
||||
env:
|
||||
- name: MINIO_ACCESS_KEY
|
||||
value: "minio"
|
||||
- name: MINIO_SECRET_KEY
|
||||
value: "minio123"
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
volumeMounts:
|
||||
- name: storage
|
||||
mountPath: "/storage"
|
||||
- name: config
|
||||
mountPath: "/config"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: velero
|
||||
name: minio
|
||||
labels:
|
||||
component: minio
|
||||
spec:
|
||||
# ClusterIP is recommended for production environments.
|
||||
# Change to NodePort if needed per documentation,
|
||||
# but only if you run Minio in a test/trial environment, for example with Minikube.
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9000
|
||||
targetPort: 9000
|
||||
protocol: TCP
|
||||
selector:
|
||||
component: minio
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
namespace: velero
|
||||
name: minio-setup
|
||||
labels:
|
||||
component: minio
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: minio-setup
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: mc
|
||||
image: minio/mc:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "mc --config-dir=/config config host add velero http://minio:9000 minio minio123 && mc --config-dir=/config mb -p velero/velero"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/config"
|
||||
@@ -1,297 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (unknown)
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: podvolumebackups.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
names:
|
||||
kind: PodVolumeBackup
|
||||
listKind: PodVolumeBackupList
|
||||
plural: podvolumebackups
|
||||
singular: podvolumebackup
|
||||
scope: ""
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: PodVolumeBackupSpec is the specification for a PodVolumeBackup.
|
||||
properties:
|
||||
backupStorageLocation:
|
||||
description: BackupStorageLocation is the name of the backup storage
|
||||
location where the restic repository is stored.
|
||||
type: string
|
||||
node:
|
||||
description: Node is the name of the node that the Pod is running
|
||||
on.
|
||||
type: string
|
||||
pod:
|
||||
description: Pod is a reference to the pod containing the volume to
|
||||
be backed up.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: 'If referring to a piece of an object instead of
|
||||
an entire object, this string should contain a valid JSON/Go
|
||||
field access statement, such as desiredState.manifest.containers[2].
|
||||
For example, if the object reference is to a container within
|
||||
a pod, this would take on a value like: "spec.containers{name}"
|
||||
(where "name" refers to the name of the container that triggered
|
||||
the event) or if no container name is specified "spec.containers[2]"
|
||||
(container with index 2 in this pod). This syntax is chosen
|
||||
only to have some well-defined way of referencing a part of
|
||||
an object. TODO: this design is not final and this field is
|
||||
subject to change in the future.'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||
type: string
|
||||
namespace:
|
||||
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: 'Specific resourceVersion to which this reference
|
||||
is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
|
||||
type: string
|
||||
uid:
|
||||
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
|
||||
type: string
|
||||
type: object
|
||||
repoIdentifier:
|
||||
description: RepoIdentifier is the restic repository identifier.
|
||||
type: string
|
||||
tags:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Tags are a map of key-value pairs that should be applied
|
||||
to the volume backup as tags.
|
||||
type: object
|
||||
volume:
|
||||
description: Volume is the name of the volume within the Pod to be
|
||||
backed up.
|
||||
type: string
|
||||
required:
|
||||
- backupStorageLocation
|
||||
- node
|
||||
- pod
|
||||
- repoIdentifier
|
||||
- volume
|
||||
type: object
|
||||
status:
|
||||
description: PodVolumeBackupStatus is the current status of a PodVolumeBackup.
|
||||
properties:
|
||||
completionTimestamp:
|
||||
description: CompletionTimestamp records the time a backup was completed.
|
||||
Completion time is recorded even on failed backups. Completion time
|
||||
is recorded before uploading the backup object. The server's time
|
||||
is used for CompletionTimestamps
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
description: Message is a message about the pod volume backup's status.
|
||||
type: string
|
||||
path:
|
||||
description: Path is the full path within the controller pod being
|
||||
backed up.
|
||||
type: string
|
||||
phase:
|
||||
description: Phase is the current state of the PodVolumeBackup.
|
||||
enum:
|
||||
- New
|
||||
- InProgress
|
||||
- Completed
|
||||
- Failed
|
||||
type: string
|
||||
progress:
|
||||
description: Progress holds the total number of bytes of the volume
|
||||
and the current number of backed up bytes. This can be used to display
|
||||
progress information about the backup operation.
|
||||
properties:
|
||||
bytesDone:
|
||||
format: int64
|
||||
type: integer
|
||||
totalBytes:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
snapshotID:
|
||||
description: SnapshotID is the identifier for the snapshot of the
|
||||
pod volume.
|
||||
type: string
|
||||
startTimestamp:
|
||||
description: StartTimestamp records the time a backup was started.
|
||||
Separate from CreationTimestamp, since that value changes on restores.
|
||||
The server's time is used for StartTimestamps
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (unknown)
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: podvolumerestores.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
names:
|
||||
kind: PodVolumeRestore
|
||||
listKind: PodVolumeRestoreList
|
||||
plural: podvolumerestores
|
||||
singular: podvolumerestore
|
||||
scope: ""
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: PodVolumeRestoreSpec is the specification for a PodVolumeRestore.
|
||||
properties:
|
||||
backupStorageLocation:
|
||||
description: BackupStorageLocation is the name of the backup storage
|
||||
location where the restic repository is stored.
|
||||
type: string
|
||||
pod:
|
||||
description: Pod is a reference to the pod containing the volume to
|
||||
be restored.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: 'If referring to a piece of an object instead of
|
||||
an entire object, this string should contain a valid JSON/Go
|
||||
field access statement, such as desiredState.manifest.containers[2].
|
||||
For example, if the object reference is to a container within
|
||||
a pod, this would take on a value like: "spec.containers{name}"
|
||||
(where "name" refers to the name of the container that triggered
|
||||
the event) or if no container name is specified "spec.containers[2]"
|
||||
(container with index 2 in this pod). This syntax is chosen
|
||||
only to have some well-defined way of referencing a part of
|
||||
an object. TODO: this design is not final and this field is
|
||||
subject to change in the future.'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||
type: string
|
||||
namespace:
|
||||
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: 'Specific resourceVersion to which this reference
|
||||
is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
|
||||
type: string
|
||||
uid:
|
||||
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
|
||||
type: string
|
||||
type: object
|
||||
repoIdentifier:
|
||||
description: RepoIdentifier is the restic repository identifier.
|
||||
type: string
|
||||
snapshotID:
|
||||
description: SnapshotID is the ID of the volume snapshot to be restored.
|
||||
type: string
|
||||
volume:
|
||||
description: Volume is the name of the volume within the Pod to be
|
||||
restored.
|
||||
type: string
|
||||
required:
|
||||
- backupStorageLocation
|
||||
- pod
|
||||
- repoIdentifier
|
||||
- snapshotID
|
||||
- volume
|
||||
type: object
|
||||
status:
|
||||
description: PodVolumeRestoreStatus is the current status of a PodVolumeRestore.
|
||||
properties:
|
||||
completionTimestamp:
|
||||
description: CompletionTimestamp records the time a restore was completed.
|
||||
Completion time is recorded even on failed restores. The server's
|
||||
time is used for CompletionTimestamps
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
description: Message is a message about the pod volume restore's status.
|
||||
type: string
|
||||
phase:
|
||||
description: Phase is the current state of the PodVolumeRestore.
|
||||
enum:
|
||||
- New
|
||||
- InProgress
|
||||
- Completed
|
||||
- Failed
|
||||
type: string
|
||||
progress:
|
||||
description: Progress holds the total number of bytes of the snapshot
|
||||
and the current number of restored bytes. This can be used to display
|
||||
progress information about the restore operation.
|
||||
properties:
|
||||
bytesDone:
|
||||
format: int64
|
||||
type: integer
|
||||
totalBytes:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
startTimestamp:
|
||||
description: StartTimestamp records the time a restore was started.
|
||||
The server's time is used for StartTimestamps
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
||||
@@ -1,85 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (unknown)
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: resticrepositories.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
names:
|
||||
kind: ResticRepository
|
||||
listKind: ResticRepositoryList
|
||||
plural: resticrepositories
|
||||
singular: resticrepository
|
||||
scope: ""
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: ResticRepositorySpec is the specification for a ResticRepository.
|
||||
properties:
|
||||
backupStorageLocation:
|
||||
description: BackupStorageLocation is the name of the BackupStorageLocation
|
||||
that should contain this repository.
|
||||
type: string
|
||||
maintenanceFrequency:
|
||||
description: MaintenanceFrequency is how often maintenance should
|
||||
be run.
|
||||
type: string
|
||||
resticIdentifier:
|
||||
description: ResticIdentifier is the full restic-compatible string
|
||||
for identifying this repository.
|
||||
type: string
|
||||
volumeNamespace:
|
||||
description: VolumeNamespace is the namespace this restic repository
|
||||
contains pod volume backups for.
|
||||
type: string
|
||||
required:
|
||||
- backupStorageLocation
|
||||
- maintenanceFrequency
|
||||
- resticIdentifier
|
||||
- volumeNamespace
|
||||
type: object
|
||||
status:
|
||||
description: ResticRepositoryStatus is the current status of a ResticRepository.
|
||||
properties:
|
||||
lastMaintenanceTime:
|
||||
description: LastMaintenanceTime is the last time maintenance was
|
||||
run.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
description: Message is a message about the current status of the
|
||||
ResticRepository.
|
||||
type: string
|
||||
phase:
|
||||
description: Phase is the current state of the ResticRepository.
|
||||
enum:
|
||||
- New
|
||||
- Ready
|
||||
- NotReady
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
||||
@@ -1,80 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (unknown)
|
||||
labels:
|
||||
component: velero
|
||||
name: volumesnapshotlocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
names:
|
||||
kind: VolumeSnapshotLocation
|
||||
listKind: VolumeSnapshotLocationList
|
||||
plural: volumesnapshotlocations
|
||||
singular: volumesnapshotlocation
|
||||
scope: ""
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: VolumeSnapshotLocation is a location where Velero stores volume
|
||||
snapshots.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: VolumeSnapshotLocationSpec defines the specification for
|
||||
a Velero VolumeSnapshotLocation.
|
||||
properties:
|
||||
config:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Config is for provider-specific configuration fields.
|
||||
type: object
|
||||
provider:
|
||||
description: Provider is the provider of the volume storage.
|
||||
type: string
|
||||
required:
|
||||
- provider
|
||||
type: object
|
||||
status:
|
||||
description: VolumeSnapshotLocationStatus describes the current status
|
||||
of a Velero VolumeSnapshotLocation.
|
||||
properties:
|
||||
phase:
|
||||
description: VolumeSnapshotLocationPhase is the lifecyle phase of
|
||||
a Velero VolumeSnapshotLocation.
|
||||
enum:
|
||||
- Available
|
||||
- Unavailable
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1
|
||||
versions:
|
||||
- name: v1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: velero.io/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
config:
|
||||
region: us-east-2
|
||||
provider: aws
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
deploy: velero
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: velero
|
||||
deploy: velero
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
name: velero
|
||||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
volumeMounts:
|
||||
- mountPath: /plugins
|
||||
name: plugins
|
||||
- mountPath: /credentials
|
||||
name: cloud-credential-aws
|
||||
initContainers:
|
||||
- image: velero/velero-plugin-for-aws:v1.0.1
|
||||
imagePullPolicy: Always
|
||||
name: velero-plugin-for-aws
|
||||
volumeMounts:
|
||||
- mountPath: /target
|
||||
name: plugins
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: plugins
|
||||
- name: cloud-credential-aws
|
||||
secret:
|
||||
secretName: cloud-credential-aws
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
deploy: velero
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: velero
|
||||
deploy: velero
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
name: velero
|
||||
env:
|
||||
- name: AZURE_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
volumeMounts:
|
||||
- mountPath: /plugins
|
||||
name: plugins
|
||||
- mountPath: /credentials
|
||||
name: cloud-credential-azure
|
||||
initContainers:
|
||||
- image: velero/velero-plugin-for-microsoft-azure:v1.0.1
|
||||
imagePullPolicy: Always
|
||||
name: velero-plugin-for-microsoft-azure
|
||||
volumeMounts:
|
||||
- mountPath: /target
|
||||
name: plugins
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: plugins
|
||||
- name: cloud-credential-azure
|
||||
secret:
|
||||
secretName: cloud-credential-azure
|
||||
@@ -1,3 +0,0 @@
|
||||
[default]
|
||||
aws_access_key_id = minio
|
||||
aws_secret_access_key = minio123
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
bases:
|
||||
- ../../base
|
||||
|
||||
patchesStrategicMerge:
|
||||
- aws-plugin.yaml # this patches the Velero deployment
|
||||
# - azure-plugin.yaml # this patches the Velero deployment
|
||||
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
labels:
|
||||
component: velero
|
||||
|
||||
secretGenerator:
|
||||
- name: cloud-credentials
|
||||
files:
|
||||
- "cloud"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: restic
|
||||
namespace: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: restic
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: velero
|
||||
name: restic
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- restic
|
||||
- server
|
||||
command:
|
||||
- /velero
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: VELERO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
- name: GOOGLE_APPLICATION_CREDENTIALS
|
||||
value: /credentials/cloud
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: AZURE_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
image: velero/velero:latest
|
||||
imagePullPolicy: Always
|
||||
name: restic
|
||||
resources: {}
|
||||
volumeMounts:
|
||||
- mountPath: /host_pods
|
||||
mountPropagation: HostToContainer
|
||||
name: host-pods
|
||||
- mountPath: /scratch
|
||||
name: scratch
|
||||
- mountPath: /credentials
|
||||
name: cloud-credentials
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
serviceAccountName: velero
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
name: host-pods
|
||||
- emptyDir: {}
|
||||
name: scratch
|
||||
- name: cloud-credentials
|
||||
secret:
|
||||
secretName: cloud-credentials
|
||||
updateStrategy: {}
|
||||
@@ -1,5 +1,7 @@
|
||||
# Design proposal template (replace with your proposal's title)
|
||||
|
||||
Status: {Draft,Accepted,Declined}
|
||||
|
||||
One to two sentences that describes the goal of this proposal.
|
||||
The reader should be able to tell by the title, and the opening paragraph, if this document is relevant to them.
|
||||
|
||||
|
||||
@@ -1,373 +0,0 @@
|
||||
# Proposal for a more intuitive CLI to install and configure Velero
|
||||
|
||||
Currently, the Velero CLI tool has a `install` command that configures numerous major and minor aspects of Velero. As a result, the combined set of flags for this `install` command makes it hard to intuit and reason about the different Velero components. This document proposes changes to improve the UX for installation and configuration in a way that would make it easier for the user to discover what needs to be configured by looking at what is available in the CLI rather then having to rely heavily on our documentation for the usage. At the same time, it is expected that the documentation update to reflect these changes will also make the documentation flow easier to follow.
|
||||
|
||||
This proposal prioritizes discoverability and self-documentation over minimizing length or number of commands and flags.
|
||||
|
||||
## Goals
|
||||
|
||||
- Split flags currently under the `velero install` command into multiple commands, and group flags under commands in a way that allows a good level of discovery and self-documentation
|
||||
- Maintain compatibility with gitops practices (i.e. ability to generate a full set of yaml for install that can be stored in source control)
|
||||
- Have a clear path for deprecating commands
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Introduce new CLI features
|
||||
- Propose changes to the CLI that go beyond the functionality of install and configure
|
||||
- Optimize for shorter length or number of commands/flags
|
||||
|
||||
## Background
|
||||
|
||||
This document proposes users could benefit from a more intuitive and self-documenting CLI setup as compared to our existing CLI UX. Ultimately, it is proposed that a recipe-style CLI flow for installation, configuration and use would greatly contribute to this purpose.
|
||||
|
||||
Also, the `install` command currently can be reused to update Velero deployment configurations. For server and restic related install and configurations, settings will be moved to under `velero config`.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
The naming and organization of the proposed new CLI commands below have been inspired on the `kubectl` commands, particularly `kubectl set` and `kubectl config`.
|
||||
|
||||
#### General CLI improvements
|
||||
|
||||
These are improvements that are part of this proposal:
|
||||
- Go over all flags and document what is optional, what is required, and default values.
|
||||
- Capitalize all help messages
|
||||
|
||||
#### Commands
|
||||
|
||||
The organization of the commands follows this format:
|
||||
|
||||
```
|
||||
velero [resource] [operation] [flags]
|
||||
```
|
||||
|
||||
To conform with Velero's current practice:
|
||||
- commands will also work by swapping the operation/resource.
|
||||
- the "object" of a command is an argument, and flags are strictly for modifiers (example: `backup get my-backup` and not `backup get --name my-backup`)
|
||||
|
||||
All commands will include the `--dry-run` flag, which can be used to output yaml files containing the commands' configuration for resource creation or patching.
|
||||
|
||||
`--dry-run generate resources, but don't send them to the cluster. Use with -o. Optional.`
|
||||
|
||||
The `--help` and `--output` flags will also be included for all commands, omitted below for brevity.
|
||||
|
||||
Below is the proposed set of new commands to setup and configure Velero.
|
||||
|
||||
1) `velero config`
|
||||
|
||||
```
|
||||
server Configure up the namespace, RBAC, deployment, etc., but does not add any external plugins, BSL/VSL definitions. This would be the minimum set of commands to get the Velero server up and running and ready to accept other configurations.
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--show-labels show labels in the last column
|
||||
--image string image to use for the Velero and restic server pods. Optional. (default "velero/velero:latest")
|
||||
--pod-annotations mapStringString annotations to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2
|
||||
--restore-only run the server in restore-only mode. Optional.
|
||||
--pod-cpu-limit string CPU limit for Velero pod. A value of "0" is treated as unbounded. Optional. (default "1000m")
|
||||
--pod-cpu-request string CPU request for Velero pod. A value of "0" is treated as unbounded. Optional. (default "500m")
|
||||
--pod-mem-limit string memory limit for Velero pod. A value of "0" is treated as unbounded. Optional. (default "256Mi")
|
||||
--pod-mem-request string memory request for Velero pod. A value of "0" is treated as unbounded. Optional. (default "128Mi")
|
||||
--client-burst int maximum number of requests by the server to the Kubernetes API in a short period of time (default 30)
|
||||
--client-qps float32 maximum number of requests per second by the server to the Kubernetes API once the burst limit has been reached (default 20)
|
||||
--default-backup-ttl duration how long to wait by default before backups can be garbage collected (default 720h0m0s)
|
||||
--disable-controllers strings list of controllers to disable on startup. Valid values are backup,backup-sync,schedule,gc,backup-deletion,restore,download-request,restic-repo,server-status-request
|
||||
--log-format the format for log output. Valid values are text, json. (default text)
|
||||
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
|
||||
--metrics-address string the address to expose prometheus metrics (default ":8085")
|
||||
--plugin-dir string directory containing Velero plugins (default "/plugins")
|
||||
--profiler-address string the address to expose the pprof profiler (default "localhost:6060")
|
||||
--restore-only run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled. DEPRECATED: this flag will be removed in v2.0. Use read-only backup storage locations instead.
|
||||
--restore-resource-priorities strings desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources (default [namespaces,storageclasses,persistentvolumes,persistentvolumeclaims,secrets,configmaps,serviceaccounts,limitranges,pods,replicaset,customresourcedefinitions])
|
||||
--terminating-resource-timeout duration how long to wait on persistent volumes and namespaces to terminate during a restore before timing out (default 10m0s)
|
||||
|
||||
restic Configuration for restic operations.
|
||||
--default-prune-frequency duration how often 'restic prune' is run for restic repositories by default. Optional.
|
||||
--pod-annotations mapStringString annotations to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2
|
||||
--pod-cpu-limit string CPU limit for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--pod-cpu-request string CPU request for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--pod-mem-limit string memory limit for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--pod-mem-request string memory request for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--timeout duration how long backups/restores of pod volumes should be allowed to run before timing out (default 1h0m0s)
|
||||
repo
|
||||
get Get restic repositories
|
||||
```
|
||||
The `velero config server` command will create the following resources:
|
||||
|
||||
```
|
||||
Namespace
|
||||
Deployment
|
||||
backups.velero.io
|
||||
backupstoragelocations.velero.io
|
||||
deletebackuprequests.velero.io
|
||||
downloadrequests.velero.io
|
||||
podvolumebackups.velero.io
|
||||
podvolumerestores.velero.io
|
||||
resticrepositories.velero.io
|
||||
restores.velero.io
|
||||
schedules.velero.io
|
||||
serverstatusrequests.velero.io
|
||||
volumesnapshotlocations.velero.io
|
||||
```
|
||||
|
||||
Note: Velero will maintain the `velero server` command run by the Velero pod, which starts the Velero server deployment.
|
||||
|
||||
2) `velero backup-location`
|
||||
Commands/flags for backup locations.
|
||||
|
||||
```
|
||||
set
|
||||
--default string sets the default backup storage location (default "default") (NEW, -- was `server --default-backup-storage-location; could be set as an annotation on the BSL)
|
||||
--credentials mapStringString sets the name of the corresponding credentials secret for a provider. Format is provider:credentials-secret-name. (NEW)
|
||||
--cacert-file mapStringString configuration to use for creating a secret containing a custom certificate for an S3 location of a plugin provider. Format is provider:path-to-file. (NEW)
|
||||
|
||||
create NAME [flags]
|
||||
--default Sets this new location to be the new default backup location. Default is false. (NEW)
|
||||
--access-mode access mode for the backup storage location. Valid values are ReadWrite,ReadOnly (default ReadWrite)
|
||||
--backup-sync-period 0s how often to ensure all Velero backups in object storage exist as Backup API objects in the cluster. Optional. Set this to 0s to disable sync
|
||||
--bucket string name of the object storage bucket where backups should be stored. Required.
|
||||
--config mapStringString configuration to use for creating a backup storage location. Format is key1=value1,key2=value2 (was also in `velero install --backup-location-config`). Required for Azure.
|
||||
--provider string provider name for backup storage. Required.
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup storage location
|
||||
--prefix string prefix under which all Velero data should be stored within the bucket. Optional.
|
||||
--provider string name of the backup storage provider (e.g. aws, azure, gcp)
|
||||
--show-labels show labels in the last column
|
||||
--credentials mapStringString sets the name of the corresponding credentials secret for a provider. Format is provider:credentials-secret-name. (NEW)
|
||||
--cacert-file mapStringString configuration to use for creating a secret containing a custom certificate for an S3 location of a plugin provider. Format is provider:path-to-file. (NEW)
|
||||
|
||||
get Display backup storage locations
|
||||
--default displays the current default backup storage location (NEW)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
|
||||
```
|
||||
|
||||
3) `velero snapshot-location`
|
||||
Commands/flags for snapshot locations.
|
||||
|
||||
```
|
||||
set
|
||||
--default mapStringString sets the list of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...) (NEW, -- was `server --default-volume-snapshot-locations; could be set as an annotation on the VSL)
|
||||
--credentials mapStringString sets the list of name of the corresponding credentials secret for providers. Format is (provider1:credentials-secret-name1,provider2:credentials-secret-name2,...) (NEW)
|
||||
|
||||
create NAME [flags]
|
||||
--default Sets these new locations to be the new default snapshot locations. Default is false. (NEW)
|
||||
--config mapStringString configuration to use for creating a volume snapshot location. Format is key1=value1,key2=value2 (was also in `velero install --`snapshot-location-config`). Required.
|
||||
--provider string provider name for volume storage. Required.
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the volume snapshot location
|
||||
--provider string name of the volume snapshot provider (e.g. aws, azure, gcp)
|
||||
--show-labels show labels in the last column
|
||||
--credentials mapStringString sets the list of name of the corresponding credentials secret for providers. Format is (provider1:credentials-secret-name1,provider2:credentials-secret-name2,...) (NEW)
|
||||
|
||||
get Display snapshot locations
|
||||
--default list of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...) (NEW -- was `server --default-volume-snapshot-locations`))
|
||||
```
|
||||
|
||||
4) `velero plugin`
|
||||
Configuration for plugins.
|
||||
|
||||
```
|
||||
add stringArray IMAGES [flags] - add plugin container images to install into the Velero Deployment
|
||||
|
||||
get get information for all plugins on the velero server (was `get`)
|
||||
--timeout duration maximum time to wait for plugin information to be reported (default 5s)
|
||||
|
||||
remove Remove a plugin [NAME | IMAGE]
|
||||
|
||||
set
|
||||
--credentials-file mapStringString configuration to use for creating a secret containing the AIM credentials for a plugin provider. Format is provider:path-to-file. (was `secret-file`)
|
||||
--no-secret flag indicating if a secret should be created. Must be used as confirmation if create --secret-file is not provided. Optional. (MOVED FROM install -- not sure we need it?)
|
||||
--sa-annotations mapStringString annotations to add to the Velero ServiceAccount for GKE. Add iam.gke.io/gcp-service-account=[GSA_NAME]@[PROJECT_NAME].iam.gserviceaccount.com for workload identity. Optional. Format is key1=value1,key2=value2
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
Considering this proposal, let's consider what a high-level documentation for getting Velero ready to do backups could look like for Velero users:
|
||||
|
||||
After installing the Velero CLI:
|
||||
```
|
||||
velero config server [flags] (required)
|
||||
velero config restic [flags]
|
||||
velero plugin add IMAGES [flags] (add/config provider plugins)
|
||||
velero backup-location/snapshot-location create NAME [flags] (run `velero plugin --get` to see what kind of plugins are available; create locations)
|
||||
velero backup/restore/schedule create/get/delete NAME [flags]
|
||||
```
|
||||
|
||||
The above recipe-style documentation should highlight 1) the main components of Velero, and, 2) the relationship/dependency between the main components
|
||||
|
||||
### Deprecation
|
||||
|
||||
#### Timeline
|
||||
|
||||
In order to maintain compatibility with the current Velero version for a sufficient amount of time, and give users a chance to upgrade any install scripts they might have, we will keep the current `velero install` command in parallel with the new commands until the next major Velero version, which will be Velero 2.0. In the mean time, ia deprecation warning will be added to the `velero install` command.
|
||||
|
||||
#### Commands/flags deprecated or moved
|
||||
|
||||
##### Velero Install
|
||||
`velero install (DEPRECATED)`
|
||||
|
||||
Flags moved to...
|
||||
|
||||
...`velero config server`:
|
||||
```
|
||||
--image string image to use for the Velero and restic server pods. Optional. (default "velero/velero:latest")
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--pod-annotations mapStringString annotations to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2
|
||||
--show-labels show labels in the last column
|
||||
--pod-cpu-limit string CPU limit for Velero pod. A value of "0" is treated as unbounded. Optional. (default "1000m")
|
||||
--pod-cpu-request string CPU request for Velero pod. A value of "0" is treated as unbounded. Optional. (default "500m")
|
||||
--pod-mem-limit string memory limit for Velero pod. A value of "0" is treated as unbounded. Optional. (default "256Mi")
|
||||
--pod-mem-request string memory request for Velero pod. A value of "0" is treated as unbounded. Optional. (default "128Mi")
|
||||
```
|
||||
|
||||
...`velero config restic`
|
||||
```
|
||||
--default-prune-frequency duration how often 'restic prune' is run for restic repositories by default. Optional.
|
||||
--pod-cpu-limit string CPU limit for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--pod-cpu-request string CPU request for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--pod-mem-limit string memory limit for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
--pod-mem-request string memory request for restic pod. A value of "0" is treated as unbounded. Optional. (default "0")
|
||||
```
|
||||
|
||||
...`backup-location create`
|
||||
```
|
||||
--backup-location-config mapStringString configuration to use for the backup storage location. Format is key1=value1,key2=value2
|
||||
--bucket string name of the object storage bucket where backups should be stored
|
||||
--prefix string prefix under which all Velero data should be stored within the bucket. Optional.
|
||||
```
|
||||
|
||||
...`snapshot-location create`
|
||||
```
|
||||
--snapshot-location-config mapStringString configuration to use for the volume snapshot location. Format is key1=value1,key2=value2
|
||||
```
|
||||
|
||||
...both `backup-location create` and `snapshot-location create`
|
||||
```
|
||||
--provider string provider name for backup and volume storage
|
||||
```
|
||||
|
||||
...`plugin`
|
||||
```
|
||||
--plugins stringArray Plugin container images to install into the Velero Deployment
|
||||
--sa-annotations mapStringString annotations to add to the Velero ServiceAccount. Add iam.gke.io/gcp-service-account=[GSA_NAME]@[PROJECT_NAME].iam.gserviceaccount.com for workload identity. Optional. Format is key1=value1,key2=value2
|
||||
--no-secret flag indicating if a secret should be created. Must be used as confirmation if --secret-file is not provided. Optional.
|
||||
--secret-file string (renamed `credentials-file`) file containing credentials for backup and volume provider. If not specified, --no-secret must be used for confirmation. Optional.
|
||||
```
|
||||
|
||||
Flags to deprecate:
|
||||
```
|
||||
--no-default-backup-location flag indicating if a default backup location should be created. Must be used as confirmation if --bucket or --provider are not provided. Optional.
|
||||
--use-volume-snapshots whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider. (default true)
|
||||
--wait wait for Velero deployment to be ready. Optional.
|
||||
--use-restic (obsolete since now we have `velero config restic`)
|
||||
```
|
||||
|
||||
##### Velero Server
|
||||
|
||||
These flags will be moved to under `velero config server`:
|
||||
|
||||
`velero server --default-backup-storage-location (DEPRECATED)` changed to `velero backup-location set --default`
|
||||
|
||||
`velero server --default-volume-snapshot-locations (DEPRECATED)` changed to `velero snapshot-location set --default`
|
||||
|
||||
The value for these flags will be stored as annotations.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
#### Handling CA certs
|
||||
|
||||
In anticipation of a new configuration implementation to handle custom CA certs (as per design doc https://github.com/vmware-tanzu/velero/blob/master/design/custom-ca-support.md), a new flag `velero storage-location create/set --cacert-file mapStringString` is proposed. It sets the configuration to use for creating a secret containing a custom certificate for an S3 location of a plugin provider. Format is provider:path-to-file.
|
||||
|
||||
See discussion https://github.com/vmware-tanzu/velero/pull/2259#discussion_r384700723 for more clarification.
|
||||
|
||||
#### Renaming "provider" to "location-plugin"
|
||||
|
||||
As part of this change, we should change to use the term `location-plugin` instead of `provider`. The reasoning: in practice, we usually have 1 plugin per provider, and if there is an implementation for both object store and volume snapshotter for that provider, it will all be contained in the same plugin. When we handle plugins, we follow this logic. In other words, there's a plugin name (ex: `velero.io/aws`) and it can contain implementations of kind `ObjectStore` and/or `VolumeSnapshotter`.
|
||||
|
||||
But when we handle BSL or VSL (and the CLI commands/flags that configure them), we use the term `provider`, which can cause ambiguity as if that is a kind of thing different from a plugin. If the plugin is the "thing" that contains the implementation for the desired provider, we should make it easier for the user to guess that and change BackupStorageLocation/VolumeSnapshotLocation `Spec.Provider` field to be called `Spec.Location-Plugin` and all related CLI command flags to `location-plugin`, and update the docs accordingly.
|
||||
|
||||
This change will require a CRD version bump and deprecation cycle.
|
||||
|
||||
#### GitOps Compatibility
|
||||
|
||||
To maintain compatibility with gitops practices, each of the new commands will generate `yaml` output that can be stored in source control.
|
||||
|
||||
For content examples, please refer to the files here:
|
||||
|
||||
https://github.com/carlisia/velero/tree/c-cli-design/design/CLI/PoC
|
||||
|
||||
Note: actual `yaml` file names are defined by the user.
|
||||
|
||||
`velero config server` - base/deployment.yaml
|
||||
|
||||
`velero config restic` - overlays/plugins/restic.yaml
|
||||
|
||||
`velero backup-location create` - base/backupstoragelocations.yaml
|
||||
|
||||
`velero snapshot-location create` - base/volumasnapshotlocations.yaml
|
||||
|
||||
`velero plugin add velero/velero-plugin-for-aws:v1.0.1` - overlays/plugins/aws-plugin.yaml
|
||||
|
||||
`velero plugin add velero/velero-plugin-for-microsoft-azure:v1.0.1` - overlay/plugins/azure-plugin.yaml
|
||||
|
||||
These resources can be deployed/deleted using the included kustomize setup and running:
|
||||
|
||||
```
|
||||
kubectl apply -k design/CLI/PoC/overlays/plugins/
|
||||
|
||||
kubectl delete -k design/CLI/PoC/overlays/plugins/
|
||||
```
|
||||
|
||||
Note: All CRDs, including the `ResticRepository`, may continue to be deployed at startup as it is now, or together with their respective instantiation.
|
||||
|
||||
|
||||
#### Changes to startup behavior
|
||||
|
||||
To recap, this proposal redesigns the Velero CLI to make `velero install` obsolete, and instead breaks down the installation and configuration into separate commands. These are the major highlights:
|
||||
|
||||
- Plugins will only be installed separately via `velero plugin add`
|
||||
- BSL/VSL will be continue to be configured separately, and now each will have an associated secret
|
||||
|
||||
Since each BSL/VSL will have its own association with a secret, the user will no longer need to upload a new secret whenever changing to, or adding, a BSL/VSL for a provider that is different from the one in use. This will be done at setup time. This will make it easier to support any number of BSL/VSL combinations, with different providers each.
|
||||
|
||||
The user will start up the Velero server on a cluster by using the command `velero config server`. This will create the Velero deployment resource with default values or values overwritten with flags, create the Velero CRDs, and anything else that is not specific to plugins or BSL/VSL.
|
||||
|
||||
The Velero server will start up, verify that the deployment is running, that all CRDs were found, and log a message that it is waiting for a BSL to be configured. at this point, other operations, such as configuring restic, will be allowed. Velero should keep track of its status, ie, if it is ready to create backups or not. This could be a field `ServerStatus` added to `ServerStatusRequest`. Possible values could be [ready|waiting]. "ready" would mean there is at least 1 valid BSL, and "waiting" would be anything but that.
|
||||
|
||||
When adding/configuring a BSL or VSL, we will allow creating locations, and continuously verify if there is a corresponding, valid plugin. When a valid match is found, mark the BSL/VSL as "ready". This would require adding a field to the BSL/VSL, or using the existing `Phase` field, and keep track of its status, possibly: [ready|waiting].
|
||||
|
||||
With the first approach: the server would transition into "ready" (to create backups) as soon as there is one BSL. It would require a set sequence of actions, ie, first install the plugin, only then the user can successfully configure a BSL.
|
||||
|
||||
With the second approach, the Velero server would continue looping and checking all existing BSLs for at least 1 with a "ready" status. Once it found that, it would set itself to "ready" also.
|
||||
|
||||
Another new behavior that must be added: the server needs to identify when there no longer exists a valid BSL. At this point, it should change its status from "ready" to one that indicates it is not ready, maybe "waiting". With the first approach above, this would mean checking if there is still at least one BSL. With the second approach, it would require checking the status of all BSLs to find at least one with the status of "ready".
|
||||
|
||||
As it is today, a valid VSL would not be required to create backups, unless the backup included a PV.
|
||||
|
||||
To make it easier for the user to identify if their Velero server is ready to create backups or not, a `velero status` command should be added. This issue has been created some time ago for this purpose: https://github.com/vmware-tanzu/velero/issues/1094.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
It seems that the vast majority of tools document their usage with `kubectl` and `yaml` files to install and configure their Kubernetes resources. Many of them also make use of Helm, and to a lesser extent some of them have their own CLI tools.
|
||||
|
||||
Amongst the tools that have their own CLI, not enough examples were found to establish a clear pattern of usage. It seems the most relevant priority should be to have output in `yaml` format.
|
||||
|
||||
Any set of `yaml` files can also be arranged to use with Kustomize by creating/updating resources, and patching them using Kustomize functionalities.
|
||||
|
||||
The way the Velero commands were arranged in this proposal with the ability to output corresponding `yaml` files, and the included Kustomize examples, makes it in line with the widely used practices for installation and configuration.
|
||||
|
||||
Some CLI tools do not document their usage with Kustomize, one could assume it is because anyone with knowledge of Kustomize and `yaml` files would know how to use it.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
https://github.com/jetstack/kustomize-cert-manager-demo
|
||||
|
||||
https://github.com/istio/installer/tree/master/kustomize
|
||||
|
||||
https://github.com/weaveworks/flagger/tree/master/kustomize
|
||||
|
||||
https://github.com/jpeach/contour/tree/1c575c772e9fd747fba72ae41ab99bdae7a01864/kustomize (RFC)
|
||||
|
||||
## Security Considerations
|
||||
|
||||
N/A
|
||||
@@ -1,324 +0,0 @@
|
||||
# CSI Snapshot Support
|
||||
|
||||
The Container Storage Interface (CSI) [introduced an alpha snapshot API in Kubernetes v1.12][1].
|
||||
It will reach beta support in Kubernetes v1.17, scheduled for release in December 2019.
|
||||
This proposal documents an approach for integrating support for this snapshot API within Velero, augmenting its existing capabilities.
|
||||
|
||||
## Goals
|
||||
|
||||
- Enable Velero to backup and restore CSI-backed volumes using the Kubernetes CSI CustomResourceDefinition API
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Replacing Velero's existing [VolumeSnapshotter][7] API
|
||||
- Replacing Velero's Restic support
|
||||
|
||||
## Background
|
||||
|
||||
Velero has had support for performing persistent volume snapshots since its inception.
|
||||
However, support has been limited to a handful of providers.
|
||||
The plugin API introduced in Velero v0.7 enabled the community to expand the number of supported providers.
|
||||
In the meantime, the Kubernetes sig-storage advanced the CSI spec to allow for a generic storage interface, opening up the possibility of moving storage code out of the core Kubernetes code base.
|
||||
The CSI working group has also developed a generic snapshotting API that any CSI driver developer may implement, giving users the ability to snapshot volumes from a standard interface.
|
||||
|
||||
By supporting the CSI snapshot API, Velero can extend its support to any CSI driver, without requiring a Velero-specific plugin be written, easing the development burden on providers while also reaching more end users.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
In order to support CSI's snapshot API, Velero must interact with the [`VolumeSnapshot`][2] and [`VolumeSnapshotContent`][3] CRDs.
|
||||
These act as requests to the CSI driver to perform a snapshot on the underlying provider's volume.
|
||||
This can largely be accomplished with Velero `BackupItemAction` and `RestoreItemAction` plugins that operate on these CRDs.
|
||||
|
||||
Additionally, changes to the Velero server and client code are necessary to track `VolumeSnapshot`s that are associated with a given backup, similarly to how Velero tracks its own [`volume.Snapshot`][4] type.
|
||||
Tracking these is important for allowing users to see what is in their backup, and provides parity for the existing `volume.Snapshot` and [`PodVolumeBackup`][5] types.
|
||||
This is also done to retain the object store as Velero's source of truth, without having to query the Kubernetes API server for associated `VolumeSnapshot`s.
|
||||
|
||||
`velero backup describe --details` will use the stored VolumeSnapshots to list CSI snapshots included in the backup to the user.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Resource Plugins
|
||||
|
||||
A set of [prototype][6] plugins was developed that informed this design.
|
||||
|
||||
The plugins will be as follows:
|
||||
|
||||
|
||||
#### A `BackupItemAction` for `PersistentVolumeClaim`s, named `velero.io/csi-pvc`
|
||||
|
||||
This plugin will act directly on PVCs, since an implementation of Velero's VolumeSnapshotter does not have enough information about the StorageClass to properly create the `VolumeSnapshot` objects.
|
||||
|
||||
The associated PV will be queried and checked for the presence of `PersistentVolume.Spec.PersistentVolumeSource.CSI`. (See the "Snapshot Mechanism Selection" section below).
|
||||
If this field is `nil`, then the plugin will return early without taking action.
|
||||
If the `Backup.Spec.SnapshotVolumes` value is `false`, the plugin will return early without taking action.
|
||||
|
||||
Additionally, to prevent creating CSI snapshots for volumes backed up by restic, the plugin will query for all pods in the `PersistentVolumeClaim`'s namespace.
|
||||
It will then filter out the pods that have the PVC mounted, and inspect the `backup.velero.io/backup-volumes` annotation for the associated volume's name.
|
||||
If the name is found in the list, then the plugin will return early without taking further action.
|
||||
|
||||
Create a `VolumeSnapshot.snapshot.storage.k8s.io` object from the PVC.
|
||||
Label the `VolumeSnapshot` object with the [`velero.io/backup-name`][10] label for ease of lookup later.
|
||||
Also set an ownerRef on the `VolumeSnapshot` so that cascading deletion of the Velero `Backup` will delete associated `VolumeSnapshots`.
|
||||
|
||||
The CSI controllers will create a `VolumeSnapshotContent.snapshot.storage.k8s.io` object associated with the `VolumeSnapshot`.
|
||||
|
||||
Associated `VolumeSnapshotContent` objects will be retrieved and updated with the [`velero.io/backup-name`][10] label for ease of lookup later.
|
||||
`velero.io/volume-snapshot-name` will be applied as a label to the PVC so that the `VolumeSnapshot` can be found easily for restore.
|
||||
|
||||
`VolumeSnapshot`, `VolumeSnapshotContent`, and `VolumeSnapshotClass` objects would be returned as additional items to be backed up. GitHub issue [1566][18] represents this work.
|
||||
|
||||
The `VolumeSnapshotContent.Spec.VolumeSnapshotSource.SnapshotHandle` field is the link to the underlying platform's on-disk snapshot, and must be preserved for restoration.
|
||||
|
||||
The plugin will _not_ wait for the `VolumeSnapshot.Status.readyToUse` field to be `true` before returning.
|
||||
This field indicates that the snapshot is ready to use for restoration, and for different vendors can indicate that the snapshot has been made durable.
|
||||
However, the applications can proceed as soon as `VolumeSnapshot.Status.CreationTime` is set.
|
||||
This also maintains current Velero behavior, which allows applications to quiesce and resume quickly, with minimal interruption.
|
||||
|
||||
Any sort of monitoring or waiting for durable snapshots, either Velero-native or CSI snapshots, are not covered by this proposal.
|
||||
|
||||
```
|
||||
K8s object relationships inside of the backup tarball
|
||||
+-----------------------+ +-----------------------+
|
||||
| PersistentVolumeClaim +-------------->+ PersistentVolume |
|
||||
+-----------+-----------+ +-----------+-----------+
|
||||
^ ^
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
+-----------+-----------+ +-----------+-----------+
|
||||
| VolumeSnapshot +<------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### A `RestoreItemAction` for `VolumeSnapshotContent` objects, named `velero.io/csi-vsc`
|
||||
|
||||
On restore, `VolumeSnapshotContent` objects are cleaned so that they may be properly associated with IDs assigned by the target cluster.
|
||||
|
||||
Only `VolumeSnapshotContent` objects with the `velero.io/backup-name` label will be processed, using the plugin's `AppliesTo` function.
|
||||
|
||||
The metadata (excluding labels), `PersistentVolumeClaim.UUID`, and `VolumeSnapshotRef.UUID` fields will be cleared.
|
||||
The reference fields are cleared because the associated objects will get new UUIDs in the cluster.
|
||||
This also maps to the "import" case of [the snapshot API][1].
|
||||
|
||||
This means the relationship between the `VolumeSnapshot` and `VolumeSnapshotContent` is
|
||||
one way until the CSI controllers rebind them.
|
||||
|
||||
|
||||
```
|
||||
K8s objects after the velero.io/csi-vsc plugin has run
|
||||
+-----------------------+ +-----------------------+
|
||||
| PersistentVolumeClaim +-------------->+ PersistentVolume |
|
||||
+-----------------------+ +-----------------------+
|
||||
|
||||
|
||||
+-----------------------+ +-----------------------+
|
||||
| VolumeSnapshot +-------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### A `RestoreItemAction` for `VolumeSnapshot` objects, named `velero.io/csi-vs`
|
||||
|
||||
`VolumeSnapshot` objects must be prepared for importing into the target cluster by removing IDs and metadata associated with their origin cluster.
|
||||
|
||||
Only `VolumeSnapshot` objects with the `velero.io/backup-name` label will be processed, using the plugin's `AppliesTo` function.
|
||||
|
||||
Metadata (excluding labels) and `Source` (that is, the pointer to the `PersistentVolumeClaim`) fields on the object will be cleared.
|
||||
The `VolumeSnapshot.Spec.SnapshotContentName` is the link back to the `VolumeSnapshotContent` object, and thus the actual snapshot.
|
||||
The `Source` field indicates that a new CSI snapshot operation should be performed, which isn't relevant on restore.
|
||||
This follows the "import" case of [the snapshot API][1].
|
||||
|
||||
The `Backup` associated with the `VolumeSnapshot` will be queried, and set as an ownerRef on the `VolumeSnapshot` so that deletion can cascade.
|
||||
|
||||
```
|
||||
+-----------------------+ +-----------------------+
|
||||
| PersistentVolumeClaim +-------------->+ PersistentVolume |
|
||||
+-----------------------+ +-----------------------+
|
||||
|
||||
|
||||
+-----------------------+ +-----------------------+
|
||||
| VolumeSnapshot +-------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### A `RestoreItemAction` for `PersistentVolumeClaim`s named `velero.io/csi-pvc`
|
||||
|
||||
On restore, `PersistentVolumeClaims` will need to be created from the snapshot, and thus will require editing before submission.
|
||||
|
||||
Only `PersistentVolumeClaim` objects with the `velero.io/volume-snapshot-name` label will be processed, using the plugin's `AppliesTo` function.
|
||||
Metadata (excluding labels) will be cleared, and the `velero.io/volume-snapshot-name` label will be used to find the relevant `VolumeSnapshot`.
|
||||
A reference to the `VolumeSnapshot` will be added to the `PersistentVolumeClaim.DataSource` field.
|
||||
|
||||
```
|
||||
+-----------------------+
|
||||
| PersistentVolumeClaim |
|
||||
+-----------------------+
|
||||
|
||||
+-----------------------+ +-----------------------+
|
||||
| VolumeSnapshot +-------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### VolumeSnapshotClasses
|
||||
|
||||
No special logic is required to restore `VolumeSnapshotClass` objects.
|
||||
|
||||
These plugins should be provided with Velero, as there will also be some changes to core Velero code to enable association of a `Backup` to the included `VolumeSnapshot`s.
|
||||
|
||||
|
||||
|
||||
### Velero server changes
|
||||
|
||||
Any non-plugin code changes must be behind a `EnableCSI` feature flag and the behavior will be opt-in until it's exited beta status.
|
||||
This will allow the development to continue on the feature while it's in pre-production state, while also reducing the need for long-lived feature branches.
|
||||
|
||||
[`persistBackup`][8] will be extended to query for all `VolumeSnapshot`s associated with the backup, and persist the list to JSON.
|
||||
|
||||
[`BackupStore.PutBackup`][9] will receive an additional argument, `volumeSnapshots io.Reader`, that contains the JSON representation of `VolumeSnapshots`.
|
||||
This will be written to a file named `csi-snapshots.json.gz`.
|
||||
|
||||
[`defaultRestorePriorities`][11] should be rewritten to the following to accomodate proper association between the CSI objects and PVCs. `CustomResourceDefinition`s are moved up because they're necessary for creating the CSI CRDs. The CSI CRDs are created before `PersistentVolume`s and `PersistentVolumeClaim`s so that they may be used as data sources.
|
||||
GitHub issue [1565][17] represents this work.
|
||||
|
||||
```go
|
||||
var defaultRestorePriorities = []string{
|
||||
"namespaces",
|
||||
"storageclasses",
|
||||
"customresourcedefinitions",
|
||||
"volumesnapshotclass.snapshot.storage.k8s.io",
|
||||
"volumesnapshotcontents.snapshot.storage.k8s.io",
|
||||
"volumesnapshots.snapshot.storage.k8s.io",
|
||||
"persistentvolumes",
|
||||
"persistentvolumeclaims",
|
||||
"secrets",
|
||||
"configmaps",
|
||||
"serviceaccounts",
|
||||
"limitranges",
|
||||
"pods",
|
||||
"replicaset",
|
||||
}
|
||||
```
|
||||
### Restic and CSI interaction
|
||||
|
||||
Volumes found in a `Pod`'s `backup.velero.io/backup-volumes` list will use Velero's current Restic code path.
|
||||
This also means Velero will continue to offer Restic as an option for CSI volumes.
|
||||
|
||||
The `velero.io/csi-pvc` BackupItemAction plugin will inspect pods in the namespace to ensure that it does not act on PVCs already being backed up by restic.
|
||||
|
||||
This is preferred to modifying the PVC due to the fact that Velero's current backup process backs up PVCs and PVs mounted to pods at the same time as the pod.
|
||||
|
||||
A drawback to this approach is that we're querying all pods in the namespace per PVC, which could be a large number.
|
||||
In the future, the plugin interface could be improved to have some sort of context argument, so that additional data such as our existing `resticSnapshotTracker` could be passed to plugins and reduce work.
|
||||
|
||||
### Garbage collection and deletion
|
||||
|
||||
To ensure that all created resources are deleted when a backup expires or is deleted, `VolumeSnapshot`s will have an `ownerRef` defined pointing to the Velero backup that created them.
|
||||
|
||||
In order to fully delete these objects, each `VolumeSnapshotContent`s object will need to be edited to ensure the associated provider snapshot is deleted.
|
||||
This will be done by editing the object and setting `VolumeSnapshotContent.Spec.DeletionPolicy` to `Delete`, regardless of whether or not the default policy for the class is `Retain`.
|
||||
See the Deletion Policies section below.
|
||||
The edit will happen before making Kubernetes API deletion calls to ensure that the cascade works as expected.
|
||||
|
||||
Deleting a Velero `Backup` or any associated CSI object via `kubectl` is unsupported; data will be lost or orphaned if this is done.
|
||||
|
||||
### Other snapshots included in the backup
|
||||
|
||||
Since `VolumeSnapshot` and `VolumeSnapshotContent` objects are contained within a Velero backup tarball, it is possible that all CRDs and on-disk provider snapshots have been deleted, yet the CRDs are still within other Velero backup tarballs.
|
||||
Thus, when a Velero backup that contains these CRDs is restored, the `VolumeSnapshot` and `VolumeSnapshotContent` objects are restored into the cluster, the CSI controllers will attempt to reconcile their state, and there are two possible states when the on-disk snapshot has been deleted:
|
||||
|
||||
1) If the driver _does not_ support the `ListSnapshots` gRPC method, then the CSI controllers have no way of knowing how to find it, and sets the `VolumeSnapshot.Status.readyToUse` field to `true`.
|
||||
2) If the driver _does_ support the `ListSnapshots` gRPC method, then the CSI controllers will query the state of the on-disk snapshot, see it is missing, and set `VolumeSnapshot.Status.readyToUse` and `VolumeSnapshotContent.Status.readyToUse` fields to `false`.
|
||||
|
||||
## Velero client changes
|
||||
|
||||
To use CSI features, the Velero client must use the `EnableCSI` feature flag.
|
||||
|
||||
[`DescribeBackupStatus`][13] will be extended to download the `csi-snapshots.json.gz` file for processing. GitHub Issue [1568][19] captures this work.
|
||||
|
||||
A new `describeCSIVolumeSnapshots` function should be added to the [output][12] package that knows how to render the included `VolumeSnapshot` names referenced in the `csi-snapshots.json.gz` file.
|
||||
|
||||
### Snapshot selection mechanism
|
||||
|
||||
The most accurate, reliable way to detect if a PersistentVolume is a CSI volume is to check for a non-`nil` [`PersistentVolume.Spec.PersistentVolumeSource.CSI`][16] field.
|
||||
Using the [`volume.beta.kubernetes.io/storage-provisioner`][14] is not viable, since the usage is for any PVC that should be dynamically provisioned, and is _not_ limited to CSI implementations.
|
||||
It was [introduced with dynamic provisioning support][15] in 2016, predating CSI.
|
||||
|
||||
In the `BackupItemAction` for PVCs, the associated PV will be queried and checked for the presence of `PersistentVolume.Spec.PersistentVolumeSource.CSI`.
|
||||
Volumes with any other `PersistentVolumeSource` set will use Velero's current VolumeSnapshotter plugin code path.
|
||||
|
||||
### VolumeSnapshotLocations and VolumeSnapshotClasses
|
||||
|
||||
Velero uses its own `VolumeSnapshotLocation` CRDs to specify configuration options for a given storage system.
|
||||
In Velero, this often includes topology information such as regions or availibility zones, as well as credential information.
|
||||
|
||||
CSI volume snapshotting has a `VolumeSnapshotClass` CRD which also contains configuration options for a given storage system, but these options are not the same as those that Velero would use.
|
||||
Since CSI volume snapshotting is operating within the same storage system that manages the volumes already, it does not need the same topology or credential information that Velero does.
|
||||
|
||||
As such, when used with CSI volumes, Velero's `VolumeSnapshotLocation` CRDs are not relevant, and could be omitted.
|
||||
|
||||
This will create a separate path in our documentation for the time being, and should be called out explicitly.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
* Implementing similar logic in a Velero VolumeSnapshotter plugin was considered.
|
||||
However, this is inappropriate given CSI's data model, which requires a PVC/PV's StorageClass.
|
||||
Given the arguments to the VolumeSnapshotter interface, the plugin would have to instantiate its own client and do queries against the Kubernetes API server to get the necessary information.
|
||||
|
||||
This is unnecessary given the fact that the `BackupItemAction` and `RestoreItemAction` APIs can act directly on the appropriate objects.
|
||||
|
||||
Additionally, the VolumeSnapshotter plugins and CSI volume snapshot drivers overlap - both produce a snapshot on backup and a PersistentVolume on restore.
|
||||
Thus, there's not a logical place to fit the creation of VolumeSnapshot creation in the VolumeSnapshotter interface.
|
||||
|
||||
* Implement CSI logic directly in Velero core code.
|
||||
The plugins could be packaged separately, but that doesn't necessarily make sense with server and client changes being made to accomodate CSI snapshot lookup.
|
||||
|
||||
* Implementing the CSI logic entirely in external plugins.
|
||||
As mentioned above, the necessary plugins for `PersistentVolumeClaim`, `VolumeSnapshot`, and `VolumeSnapshotContent` could be hosted out-out-of-tree from Velero.
|
||||
In fact, much of the logic for creating the CSI objects will be driven entirely inside of the plugin implementation.
|
||||
|
||||
However, Velero currently has no way for plugins to communicate that some arbitrary data should be stored in or retrieved from object storage, such as list of all `VolumeSnapshot` objects associated with a given `Backup`.
|
||||
This is important, because to display snapshots included in a backup, whether as native snapshots or Restic backups, separate JSON-encoded lists are stored within the backup on object storage.
|
||||
Snapshots are not listed directly on the `Backup` to fit within the etcd size limitations.
|
||||
Additionally, there are no client-side Velero plugin mechanisms, which means that the `velero describe backup --details` command would have no way of displaying the objects to the user, even if they were stored.
|
||||
|
||||
## Deletion Policies
|
||||
|
||||
In order for underlying, provider-level snapshots to be retained similarly to Velero's current functionality, the `VolumeSnapshotContent.Spec.DeletionPolicy` field must be set to `Retain`.
|
||||
|
||||
This is most easily accomplished by setting the `VolumeSnapshotClass.DeletionPolicy` field to `Retain`, which will be inherited by all `VolumeSnapshotContent` objects associated with the `VolumeSnapshotClass`.
|
||||
|
||||
The current default for dynamically provisioned `VolumeSnapshotContent` objects is `Delete`, which will delete the provider-level snapshot when the `VolumeSnapshotContent` object representing it is deleted.
|
||||
Additionally, the `Delete` policy will cascade a deletion of a `VolumeSnapshot`, removing the associated `VolumeSnapshotContent` object.
|
||||
|
||||
It is not currently possible to define a deletion policy on a `VolumeSnapshot` that gets passed to a `VolumeSnapshotContent` object on an individual basis.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
This proposal does not significantly change Velero's security implications within a cluster.
|
||||
|
||||
If a deployment is using solely CSI volumes, Velero will no longer need privileges to interact with volumes or snapshots, as these will be handled by the CSI driver.
|
||||
This reduces the provider permissions footprint of Velero.
|
||||
|
||||
Velero must still be able to access cluster-scoped resources in order to back up `VolumeSnapshotContent` objects.
|
||||
Without these objects, the provider-level snapshots cannot be located in order to re-associate them with volumes in the event of a restore.
|
||||
|
||||
|
||||
|
||||
[1]: https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/
|
||||
[2]: https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/apis/volumesnapshot/v1alpha1/types.go#L41
|
||||
[3]: https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/apis/volumesnapshot/v1alpha1/types.go#L161
|
||||
[4]: https://github.com/heptio/velero/blob/master/pkg/volume/snapshot.go#L21
|
||||
[5]: https://github.com/heptio/velero/blob/master/pkg/apis/velero/v1/pod_volume_backup.go#L88
|
||||
[6]: https://github.com/heptio/velero-csi-plugin/
|
||||
[7]: https://github.com/heptio/velero/blob/master/pkg/plugin/velero/volume_snapshotter.go#L26
|
||||
[8]: https://github.com/heptio/velero/blob/master/pkg/controller/backup_controller.go#L560
|
||||
[9]: https://github.com/heptio/velero/blob/master/pkg/persistence/object_store.go#L46
|
||||
[10]: https://github.com/heptio/velero/blob/master/pkg/apis/velero/v1/labels_annotations.go#L21
|
||||
[11]: https://github.com/heptio/velero/blob/master/pkg/cmd/server/server.go#L471
|
||||
[12]: https://github.com/heptio/velero/blob/master/pkg/cmd/util/output/backup_describer.go
|
||||
[13]: https://github.com/heptio/velero/blob/master/pkg/cmd/util/output/backup_describer.go#L214
|
||||
[14]: https://github.com/kubernetes/kubernetes/blob/8ea9edbb0290e9de1e6d274e816a4002892cca6f/pkg/controller/volume/persistentvolume/util/util.go#L69
|
||||
[15]: https://github.com/kubernetes/kubernetes/pull/30285
|
||||
[16]: https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/types.go#L237
|
||||
[17]: https://github.com/heptio/velero/issues/1565
|
||||
[18]: https://github.com/heptio/velero/issues/1566
|
||||
[19]: https://github.com/heptio/velero/issues/1568
|
||||
@@ -1,132 +0,0 @@
|
||||
# Custom CA Bundle Support for S3 Object Storage
|
||||
|
||||
It is desired that Velero performs SSL verification on the Object Storage
|
||||
endpoint (BackupStorageLocation), but it is not guaranteed that the Velero
|
||||
container has the endpoints' CA bundle in it's system store. Velero needs to
|
||||
support the ability for a user to specify custom CA bundles at installation
|
||||
time and Velero needs to support a mechanism in the BackupStorageLocation
|
||||
Custom Resource to allow a user to specify a custom CA bundle. This mechanism
|
||||
needs to also allow Restic to access and use this custom CA bundle.
|
||||
|
||||
## Goals
|
||||
|
||||
- Enable Velero to be configured with a custom CA bundle at installation
|
||||
- Enable Velero support for custom CA bundles with S3 API BackupStorageLocations
|
||||
- Enable Restic to use the custom CA bundles whether it is configured at installation time or on the BackupStorageLocation
|
||||
- Enable Velero client to take a CA bundle as an argument
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Support non-S3 providers
|
||||
|
||||
## Background
|
||||
|
||||
Currently, in order for Velero to perform SSL verification of the object
|
||||
storage endpoint the user must manually set the `AWS_CA_BUNDLE` environment
|
||||
variable on the Velero deployment. If the user is using Restic, the user has to
|
||||
either:
|
||||
1. Add the certs to the Restic container's system store
|
||||
1. Modify Velero to pass in the certs as a CLI parameter to Restic - requiring
|
||||
a custom Velero deployment
|
||||
|
||||
## High-Level Design
|
||||
|
||||
There are really 2 methods of using Velero with custom certificates:
|
||||
1. Including a custom certificate at Velero installation
|
||||
1. Specifying a custom certificate to be used with a `BackupStorageLocation`
|
||||
|
||||
### Specifying a custom cert at installation
|
||||
|
||||
On the Velero deployment at install time, we can set the AWS environment variable
|
||||
`AWS_CA_BUNDLE` which will allow Velero to communicate over https with the
|
||||
proper certs when communicating with the S3 bucket. This means we will add the
|
||||
ability to specify a custom CA bundle at installation time. For more
|
||||
information, see "Install Command Changes".
|
||||
|
||||
On the Restic daemonset, we will want to also mount this secret at a pre-defined
|
||||
location. In the `restic` pkg, the command to invoke restic will need to be
|
||||
updated to pass the path to the cert file that is mounted if it is specified in
|
||||
the config.
|
||||
|
||||
This is good, but doesn't allow us to specify different certs when
|
||||
`BackupStorageLocation` resources are created.
|
||||
|
||||
### Specifying a custom cert on BSL
|
||||
|
||||
In order to support custom certs for object storage, Velero will add an
|
||||
additional field to the `BackupStorageLocation`'s provider `Config` resource to
|
||||
provide a secretRef which will contain the coordinates to a secret containing
|
||||
the relevant cert file for object storage.
|
||||
|
||||
In order for Restic to be able to consume and use this cert, Velero will need
|
||||
the ability to write the CA bundle somewhere in memory for the Restic pod to
|
||||
consume it.
|
||||
|
||||
To accomplish this, we can look at the code for managing restic repository
|
||||
credentials. The way this works today is that the key is stored in a secret in
|
||||
the Velero namespace, and each time Velero executes a restic command, the
|
||||
contents of the secret are read and written out to a temp file. The path to
|
||||
this file is then passed to restic and removed afterwards. pass the path of the
|
||||
temp file to restic, and then remove the temp file afterwards. See ref #1 and #2.
|
||||
|
||||
This same approach can be taken for CA bundles. The bundle can be stored in a
|
||||
secret which is referenced on the BSL and written to a temp file prior to
|
||||
invoking Restic.
|
||||
|
||||
[1](https://github.com/vmware-tanzu/velero/blob/master/pkg/restic/repository_manager.go#L238-L245)
|
||||
[2](https://github.com/vmware-tanzu/velero/blob/master/pkg/restic/common.go#L168-L203)
|
||||
|
||||
## Detailed Design
|
||||
|
||||
The `AWS_CA_BUNDLE` environment variable works for the Velero deployment
|
||||
because this environment variable is passed into the AWS SDK which is used in
|
||||
the [plugin][1] to build up the config object. This means that a user can
|
||||
simply define the CA bundle in the deployment as an env var. This can be
|
||||
utilized for the installation of Velero with a custom cert by simply setting
|
||||
this env var to the contents of the CA bundle, or the env var can be mapped to
|
||||
a secret which is controlled at installation time. I recommend using a secret
|
||||
as it makes the Restic integration easier as well.
|
||||
|
||||
At installation time, if a user has specified a custom cert then the Restic
|
||||
daemonset should be updated to include the secret mounted at a predefined path.
|
||||
We could optionally use the system store for all custom certs added at
|
||||
installation time. Restic supports using the custom certs [in addition][3] to
|
||||
the root certs.
|
||||
|
||||
In the case of the BSL being created with a secret reference, then at runtime
|
||||
the secret will need to be consumed. This secret will be read and applied to
|
||||
the AWS `session` object. The `getSession()` function will need to be updated
|
||||
to take in the custom CA bundle so it can be passed [here][4].
|
||||
|
||||
The Restic controller will need to be updated to write the contents of the CA
|
||||
bundle secret out to a temporary file inside of the restic pod.The restic
|
||||
[command invocation][2] will need to be updated to include the path to the file
|
||||
as an argument to the restic server using `--cacert`. For the path when a user
|
||||
defines a custom cert on the BSL, Velero will be responsible for updating the
|
||||
daemonset to include the secret mounted as a volume at a predefined path.
|
||||
|
||||
Where we mount the secret is a fine detail, but I recommend mounting the certs
|
||||
to `/certs` to keep it in line with the other volume mount paths being used.
|
||||
|
||||
### Install command changes
|
||||
|
||||
The installation flags should be updated to include the ability to pass in a
|
||||
cert file. Then the install command would do the heavy lifting of creating a
|
||||
secret and updating the proper fields on the deployment and daemonset to mount
|
||||
the secret at a well defined path.
|
||||
|
||||
### Velero client changes
|
||||
|
||||
Since the Velero client is responsible for gathering logs and information about
|
||||
the Object Storage, this implementation should include a new flag `--cacert`
|
||||
which can be used when communicating with the Object Storage. Additionally, the
|
||||
user should be able to set this in their client configuration. The command
|
||||
would look like:
|
||||
```
|
||||
$ velero client config set cacert PATH
|
||||
```
|
||||
|
||||
[1]: https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/master/velero-plugin-for-aws/object_store.go#L135
|
||||
[2]: https://github.com/vmware-tanzu/velero/blob/master/pkg/restic/command.go#L47
|
||||
[3]: https://github.com/restic/restic/blob/master/internal/backend/http_transport.go#L81
|
||||
[4]: https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/master/velero-plugin-for-aws/object_store.go#L154
|
||||
@@ -1,164 +0,0 @@
|
||||
# Generating Velero CRDs with structural schema support
|
||||
|
||||
As the apiextensions.k8s.io API moves to GA, structural schema in Custom Resource Definitions (CRDs) will become required.
|
||||
|
||||
This document proposes updating the CRD generation logic as part of `velero install` to include structural schema for each Velero CRD.
|
||||
|
||||
## Goals
|
||||
|
||||
- Enable structural schema and validation for Velero Custom Resources.
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Update Velero codebase to use Kubebuilder for controller/code generation.
|
||||
- Solve for keeping CRDs in the Velero Helm chart up-to-date.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, Velero CRDs created by the `velero install` command do not contain any structural schema.
|
||||
The CRD is simply [generated at runtime](https://github.com/heptio/velero/blob/8b0cf3855c2b8aa631cf22e63da0955f7b1d06a8/pkg/install/crd.go#L39) using the name and plurals from the [`velerov1api.CustomResources()`](https://github.com/heptio/velero/blob/8b0cf3855c2b8aa631cf22e63da0955f7b1d06a8/pkg/apis/velero/v1/register.go#L60) info.
|
||||
|
||||
Updating the info returned by that method would be one way to add support for structural schema when generating the CRDs, but this would require manually describing the schema and would duplicate information from the API structs (e.g. comments describing a field).
|
||||
|
||||
Instead, the [controller-tools](https://github.com/kubernetes-sigs/controller-tools) project from Kubebuilder provides tooling for generating CRD manifests (YAML) from the Velero API types.
|
||||
This document proposes adding _controller-tools_ to the project to automatically generate CRDs, and use these generated CRDs as part of `velero install`.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
_controller-tools_ works by reading the Go files that contain the API type definitions.
|
||||
It uses a combination of the struct fields, types, tags and comments to build the OpenAPIv3 schema for the CRDs. The tooling makes some assumptions based on conventions followed in upstream Kubernetes and the ecosystem, which involves some changes to the Velero API type definitions, especially around optional fields.
|
||||
|
||||
In order for _controller-tools_ to read the Go files containing Velero API type defintiions, the CRDs need to be generated at build time, as these files are not available at runtime (i.e. the Go files are not accessible by the compiled binary).
|
||||
These generated CRD manifests (YAML) will then need to be available to the `pkg/install` package for it to include when installing Velero resources.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Changes to Velero API type definitions
|
||||
|
||||
API type definitions need to be updated to correctly identify optional and required fields for each API type.
|
||||
Upstream Kubernetes defines all optional fields using the `omitempty` tag as well as a `// +optional` annotation above the field (e.g. see [PodSpec definition](https://github.com/kubernetes/api/blob/master/core/v1/types.go#L2835-L2838)).
|
||||
_controller-tools_ will mark a field as optional if it sees either the tag or the annotation, but to keep consistent with upstream, optional fields will be updated to use both indicators (as [suggested](https://github.com/kubernetes-sigs/kubebuilder/issues/479) by the Kubebuilder project).
|
||||
Additionally, upstream Kubernetes defines the metav1.ObjectMeta, metav1.ListMeta, Spec and Status as [optional on all types](https://github.com/kubernetes/api/blob/master/core/v1/types.go#L3517-L3531).
|
||||
Some Velero API types set the `omitempty` tag on Status, but not on other fields - these will all need to be updated to be made optional.
|
||||
|
||||
Below is a list of the Velero API type fields and what changes (if any) will be made.
|
||||
Note that this only includes fields used in the spec, all status fields will become optional.
|
||||
|
||||
| Type | Field | Changes |
|
||||
|---------------------------------|-------------------------|-------------------------------------------------------------|
|
||||
| BackupSpec | IncludedNamespaces | make optional |
|
||||
| | ExcludedNamespaces | make optional |
|
||||
| | IncludedResources | make optional |
|
||||
| | ExcludedResources | make optional |
|
||||
| | LabelSelector | make optional |
|
||||
| | SnapshotVolumes | make optional |
|
||||
| | TTL | make optional |
|
||||
| | IncludeClusterResources | make optional |
|
||||
| | Hooks | make optional |
|
||||
| | StorageLocation | make optional |
|
||||
| | VolumeSnapshotLocations | make optional |
|
||||
| BackupHooks | Resources | make optional |
|
||||
| BackupResourceHookSpec | Name | none (required) |
|
||||
| | IncludedNamespaces | make optional |
|
||||
| | ExcludedNamespaces | make optional |
|
||||
| | IncludedResources | make optional |
|
||||
| | ExcludedResources | make optional |
|
||||
| | LabelSelector | make optional |
|
||||
| | PreHooks | make optional |
|
||||
| | PostHooks | make optional |
|
||||
| BackupResourceHook | Exec | none (required) |
|
||||
| ExecHook | Container | make optional |
|
||||
| | Command | required, validation: MinItems=1 |
|
||||
| | OnError | make optional |
|
||||
| | Timeout | make optional |
|
||||
| HookErrorMode | | validation: Enum |
|
||||
| BackupStorageLocationSpec | Provider | none (required) |
|
||||
| | Config | make optional |
|
||||
| | StorageType | none (required) |
|
||||
| | AccessMode | make optional |
|
||||
| StorageType | ObjectStorage | make required |
|
||||
| ObjectStorageLocation | Bucket | none (required) |
|
||||
| | Prefix | make optional |
|
||||
| BackupStorageLocationAccessMode | | validation: Enum |
|
||||
| DeleteBackupRequestSpec | BackupName | none (required) |
|
||||
| DownloadRequestSpec | Target | none (required) |
|
||||
| DownloadTarget | Kind | none (required) |
|
||||
| | Name | none (required) |
|
||||
| DownloadTargetKind | | validation: Enum |
|
||||
| PodVolumeBackupSpec | Node | none (required) |
|
||||
| | Pod | none (required) |
|
||||
| | Volume | none (required) |
|
||||
| | BackupStorageLocation | none (required) |
|
||||
| | RepoIdentifier | none (required) |
|
||||
| | Tags | make optional |
|
||||
| PodVolumeRestoreSpec | Pod | none (required) |
|
||||
| | Volume | none (required) |
|
||||
| | BackupStorageLocation | none (required) |
|
||||
| | RepoIdentifier | none (required) |
|
||||
| | SnapshotID | none (required) |
|
||||
| ResticRepositorySpec | VolumeNamespace | none (required) |
|
||||
| | BackupStorageLocation | none (required) |
|
||||
| | ResticIdentifier | none (required) |
|
||||
| | MaintenanceFrequency | none (required) |
|
||||
| RestoreSpec | BackupName | none (required) - should be set to "" if using ScheduleName |
|
||||
| | ScheduleName | make optional |
|
||||
| | IncludedNamespaces | make optional |
|
||||
| | ExcludedNamespaces | make optional |
|
||||
| | IncludedResources | make optional |
|
||||
| | ExcludedResources | make optional |
|
||||
| | NamespaceMapping | make optional |
|
||||
| | LabelSelector | make optional |
|
||||
| | RestorePVs | make optional |
|
||||
| | IncludeClusterResources | make optional |
|
||||
| ScheduleSpec | Template | none (required) |
|
||||
| | Schedule | none (required) |
|
||||
| VolumeSnapshotLocationSpec | Provider | none (required) |
|
||||
| | Config | make optional |
|
||||
|
||||
### Build-time generation of CRD manifests
|
||||
|
||||
The build image will be updated as follows to include the _controller-tool_ tooling:
|
||||
|
||||
|
||||
```diff
|
||||
diff --git a/hack/build-image/Dockerfile b/hack/build-image/Dockerfile
|
||||
index b69a8c8a..07eac9c6 100644
|
||||
--- a/hack/build-image/Dockerfile
|
||||
+++ b/hack/build-image/Dockerfile
|
||||
@@ -21,6 +21,8 @@ RUN mkdir -p /go/src/k8s.io && \
|
||||
git clone -b kubernetes-1.15.3 https://github.com/kubernetes/apimachinery && \
|
||||
# vendor code-generator go modules to be compatible with pre-1.15
|
||||
cd /go/src/k8s.io/code-generator && GO111MODULE=on go mod vendor && \
|
||||
+ go get -d sigs.k8s.io/controller-tools/cmd/controller-gen && \
|
||||
+ cd /go/src/sigs.k8s.io/controller-tools && GO111MODULE=on go mod vendor && \
|
||||
go get golang.org/x/tools/cmd/goimports && \
|
||||
cd /go/src/golang.org/x/tools && \
|
||||
git checkout 40a48ad93fbe707101afb2099b738471f70594ec && \
|
||||
```
|
||||
|
||||
To tie in the CRD manifest generation with existing scripts/workflows, the `hack/update-generated-crd-code.sh` script will be updated to use _controller-tools_ to generate CRDs manifests after it generates the client code.
|
||||
|
||||
The generated CRD manifests will be placed in the `pkg/generated/crds/manifests` folder.
|
||||
Similarly to client code generation, these manifests will be checked-in to the git repo.
|
||||
Checking in these manifests allows including documentation and schema changes to API types as part of code review.
|
||||
|
||||
### Updating `velero install` to include generated CRD manifests
|
||||
|
||||
As described above, CRD generation using _controller-tools_ will happen at build time due to need to inspect Go files.
|
||||
To enable the `velero install` to access the generated CRD manifests at runtime, the `pkg/generated/crds/manifests` folder will be embedded as binary data in the Velero binary (e.g. using a tool like [vfsgen](https://github.com/shurcooL/vfsgen) - see [POC branch](https://github.com/prydonius/velero/commit/4aa7413f97ce9b23e071b6054f600dd0c283351e)).
|
||||
|
||||
`velero install` will then unmarshal the binary data as `unstructured.Unstructured` types and append them to the [resources list](https://github.com/heptio/velero/blob/8b0cf3855c2b8aa631cf22e63da0955f7b1d06a8/pkg/install/resources.go#L217) in place of the existing CRD generation.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
Instead of generating and bundling CRD manifests, it could be possible to instead embed the `pkg/apis` package in the Velero binary.
|
||||
With this, _controller-tools_ could be run at runtime during `velero install` to generate the CRD manifests.
|
||||
However, this would require including _controller-tools_ as a dependency in the project, which might not be desirable as it is a developer tool.
|
||||
|
||||
Another option, to avoid embedding static files in the binary, would be to generate the CRD manifest as one YAML file in CI and upload it as a release artifact (e.g. using GitHub releases).
|
||||
`velero install` could then download this file for the current version and use it on install.
|
||||
The downside here is that `velero install` becomes dependent on the GitHub network, and we lose visibility on changes to the CRD manifests in the Git history.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
n/a
|
||||
@@ -1,90 +0,0 @@
|
||||
# Plan for moving the Velero GitHub repo into the VMware GitHub organization
|
||||
|
||||
Currently, the Velero repository sits under the Heptio GitHub organization. With the acquisition of Heptio by VMware, it is due time that this repo moves to one of the VMware GitHub organizations. This document outlines a plan to move this repo to the VMware Tanzu (https://github.com/vmware-tanzu) organization.
|
||||
|
||||
## Goals
|
||||
|
||||
- List all steps necessary to have this repo fully functional under the new org
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Highlight any step necessary around setting up the new organization and its members
|
||||
|
||||
## Action items
|
||||
|
||||
### Todo list
|
||||
|
||||
#### Pre move
|
||||
|
||||
- [ ] PR: Blog post communicating the move. https://github.com/heptio/velero/issues/1841. Who: TBD.
|
||||
- [ ] PR: Find/replace in all Go, script, yaml, documentation, and website files: `github.com/heptio/velero -> github.com/vmware-tanzu/velero`. Who: a Velero developer; TBD
|
||||
- [ ] PR: Update website with the correct GH links. Who: a Velero developer; TBD
|
||||
- [ ] PR: Change deployment and grpc-push scripts with the new location path. Who: a Velero developer; TBD
|
||||
- [ ] Delete branches not to be carried over (https://github.com/heptio/velero/branches/all). Who: Any of the current repo owners; TBD
|
||||
|
||||
#### Move
|
||||
|
||||
- [ ] Use GH UI to transfer the repository to the VMW org; must be accepted within a day. Who: new org owner; TBD
|
||||
- [ ] Make owners of this repo owners of repo in the new org. Who: new org owner; TBD
|
||||
- [ ] Update Travis CI. Who: Any of the new repo owners; TBD
|
||||
- [ ] Add DCO for signoff check (https://probot.github.io/apps/dco/). Who: Any of the new repo owners; TBD
|
||||
|
||||
|
||||
#### Post move
|
||||
|
||||
- [ ] Each individual developer should point their origin to the new location: `git remote set-url origin git@github.com:vmware-tanzu/velero.git`
|
||||
- [ ] Transfer ZenHub. Who: Any of the new repo owners; TBD
|
||||
- [ ] Update Netlify deploy settings. Any of the new repo owners; TBD
|
||||
- [ ] GH app: Netlify integration. Who: Any of the new repo owners; TBD
|
||||
- [ ] GH app: Slack integration. Who: Any of the new repo owners; TBD
|
||||
- [ ] Add webhook: travis CI. Who: Any of the new repo owners; TBD
|
||||
- [ ] Add webhook: zenhub. Who: Any of the new repo owners; TBD
|
||||
- [ ] Move all 3 native provider plugins into their own individual repo. https://github.com/heptio/velero/issues/1537. Who: @carlisia.
|
||||
- [ ] Merge PRs from the "pre move" section
|
||||
- [ ] Create a team for the Velero core members (https://github.com/orgs/vmware-tanzu/teams/). Who: Any of the new repo owners; TBD
|
||||
|
||||
### Notes/How-Tos
|
||||
|
||||
#### Transfering the GH repository
|
||||
|
||||
All action items needed for the repo transfer are listed in the Todo list above. For details about what gets moved and other info, this is the GH documentation: https://help.github.com/en/articles/transferring-a-repository
|
||||
|
||||
[Pending] We will find out this week who will be the organization owner(s) who will accept this transfer in the new GH org. This organization owner will make all current owners in this repo owners in the new org Velero repo.
|
||||
|
||||
#### Updating Travis CI
|
||||
|
||||
Someone with owner permission on the new repository needs to go to their Travis CI account and authorize Travis CI on the repo. Here are instructions: https://docs.travis-ci.com/user/tutorial/.
|
||||
|
||||
After this, webhook notifications can be added following these instructions: https://docs.travis-ci.com/user/notifications/#configuring-webhook-notifications.
|
||||
|
||||
#### Transfering ZenHub
|
||||
|
||||
Pre-requisite: A new Zenhub account must exist for a vmware or vmware-tanzu organization.
|
||||
|
||||
This page contains a pre-migration checklist for ensuring a repo migration goes well with Zenhub: https://help.zenhub.com/support/solutions/articles/43000010366-moving-a-repo-cross-organization-or-to-a-new-organization. After this, webhooks can be added by following these instructions: https://github.com/ZenHubIO/API#webhooks.
|
||||
|
||||
#### Updating Netlify
|
||||
|
||||
The settings for Netflify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
|
||||
|
||||
#### Communication strategy
|
||||
|
||||
[Pending] We will find out this week how this move will be communicated to the community. In particular, the Velero repository move might be tied to the move of our provider plugins into their own repos, also in the new org: https://github.com/heptio/velero/issues/1814.
|
||||
|
||||
#### TBD
|
||||
|
||||
Many items on the todo list must be done by a repository member with owner permission. This doesn't all need to be done by the same person obviously, but we should specify if @skriss wants to split these tasks with any other owner(s).
|
||||
|
||||
#### Other notes
|
||||
|
||||
Might want to exclude updating documentation prior to v1.0.0.
|
||||
GH documentation does not specify if branches on the server are also moved.
|
||||
All links to the original repository location are automatically redirected to the new location.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
Alternatives such as moving Velero to its own organization, or even not moving at all, were considered. Collectively, however, the open source leadership decided it would be best to move it so it lives alongside other VMware supported cloud native related repositories.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Ensure that only the Velero core team has maintainer/owner privileges.
|
||||
@@ -1,129 +0,0 @@
|
||||
# Plan to extract the provider plugins out of (the Velero) tree
|
||||
|
||||
Currently, the Velero project contains in-tree plugins for three cloud providers: AWS, Azure, and GCP. The Velero team has decided to extract each of those plugins into their own separate repository. This document details the steps necessary to create the new repositories, as well as a general design for what each plugin project will look like.
|
||||
|
||||
## Goals
|
||||
|
||||
- Have 3 new repositories for each cloud provider plugin currently supported by the Velero team: AWS, Azure, and GCP
|
||||
- Have the currently in-tree cloud provider plugins behave like any other plugin external to Velero
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Extend the Velero plugin framework capability in any way
|
||||
- Create GH repositories for any plugin other then the currently 3 in-tree plugins
|
||||
- Extract out any plugin that is not a cloud provider plugin (ex: item action related plugins)
|
||||
|
||||
## Background
|
||||
|
||||
With more and more providers wanting to support Velero, it gets more difficult to justify excluding those from being in-tree just as with the three original ones. At the same time, if we were to include any more plugins in-tree, it would ultimately become the responsibility of the Velero team to maintain an increasing number of plugins. This move aims to equalize the field so all plugins are treated equally. We also hope that, with time, developers interested in getting involved in the upkeep of those plugins will become active enough to be promoted to maintainers. Lastly, having the plugins live in their own individual repositories allows for iteration on them separately from the core codebase.
|
||||
|
||||
## Action items
|
||||
|
||||
### Todo list
|
||||
|
||||
#### Repository creation
|
||||
|
||||
- [ ] Use GH UI to create each repository in the new VMW org. Who: new org owner; TBD
|
||||
- [ ] Make owners of the Velero repo owners of each repo in the new org. Who: new org owner; TBD
|
||||
- [ ] Add Travis CI. Who: Any of the new repo owners; TBD
|
||||
- [ ] Add webhook: travis CI. Who: Any of the new repo owners; TBD
|
||||
- [ ] Add DCO for signoff check (https://probot.github.io/apps/dco/). Who: Any of the new repo owners; TBD
|
||||
|
||||
#### Plugin changes
|
||||
|
||||
- [ ] Modify Velero so it can install any of the provider plugins. https://github.com/heptio/velero/issues/1740 - Who: @nrb
|
||||
- [ ] Extract each provider plugin into their own repo. https://github.com/heptio/velero/issues/1537
|
||||
- [ ] Create deployment and gcr-push scripts with the new location path. Who: @carlisia
|
||||
- [ ] Add documentation for how to use the plugin. Who: @carlisia
|
||||
- [ ] Update Helm chart to install Velero using any of the provider plugins. https://github.com/heptio/velero/issues/1819
|
||||
- [ ] Upgrade script. https://github.com/heptio/velero/issues/1889.
|
||||
|
||||
### Notes/How-Tos
|
||||
|
||||
#### Creating the GH repository
|
||||
|
||||
[Pending] The organization owner will make all current owners in the Velero repo also owners in each of the new org plugin repos.
|
||||
|
||||
#### Setting up Travis CI
|
||||
|
||||
Someone with owner permission on the new repository needs to go to their Travis CI account and authorize Travis CI on the repo. Here are instructions: https://docs.travis-ci.com/user/tutorial/.
|
||||
|
||||
After this, any webhook notifications can be added following these instructions: https://docs.travis-ci.com/user/notifications/#configuring-webhook-notifications.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
Each provider plugin will be an independent project, using the Velero library to implement their specific functionalities.
|
||||
|
||||
The way Velero is installed will be changed to accommodate installing these plugins at deploy time, namely the Velero `install` command, as well as the Helm chart.
|
||||
|
||||
Each plugin repository will need to have their respective images built and pushed to the same registry as the Velero images.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Projects
|
||||
|
||||
Each provider plugin will be an independent GH repository, named: `velero-plugin-aws`, `velero-plugin-azure`, and `velero-plugin-gcp`.
|
||||
|
||||
Build of the project will be done the same way as with Velero, using Travis.
|
||||
|
||||
Images for all the plugins will be pushed to the same repository as the Velero image, also using Travis.
|
||||
|
||||
Releases of each of these plugins will happen in sync with releases of Velero. This will consist of having a tag in the repo and a tagged image build with the same release version as Velero so it makes it easy to identify what versions are compatible, starting at v1.2.
|
||||
|
||||
Documentation for how to install and use the plugins will be augmented in the existing Plugins section of the Velero documentation.
|
||||
|
||||
Documentation for how to use each plugin will reside in their respective repos. The navigation on the Velero documentation will be modified for easy discovery of the docs/images for these plugins.
|
||||
|
||||
#### Version compatibility
|
||||
|
||||
We will keep the major and minor release points in sync, but the plugins can have multiple minor dot something releases as long as it remains compatible with the corresponding major/minor release of Velero. Ex:
|
||||
|
||||
| Velero | Plugin | Compatible? |
|
||||
|---|---|---|
|
||||
| v1.2 | v1.2 | ✅ |
|
||||
| v1.2 | v1.2.3 | ✅ |
|
||||
| v1.2 | v1.3 | 🚫 |
|
||||
| v1.3 | v1.2 | 🚫 |
|
||||
| v1.3 | v1.3.3 | ✅ |
|
||||
|
||||
### Installation
|
||||
|
||||
As per https://github.com/heptio/velero/issues/1740, we will add a `plugins` flag to the Velero install command which will accept an array of URLs pointing to +1 images of plugins to be installed. The `velero plugin add` command should continue working as is, in specific, it should also allow the installation of any of the new 3 provider plugins. @nrb will provide specifics about how this change will be tackled, as well as what will be documented. Part of the work of adding the `plugins` flag will be removing the logic that adds `velero.io` name spacing to plugins that are added without it.
|
||||
|
||||
The Helm chart that allows the installation of Velero will be modified to accept the array of plugin images with an added `plugins` configuration item.
|
||||
|
||||
### Design code changes and considerations
|
||||
|
||||
The naming convention to use for name spacing each plugin will be `velero.io`, since they are currently maintained by the Velero team.
|
||||
|
||||
Install dep
|
||||
|
||||
Question: are there any places outside the plugins where we depend on the cloud-provider SDKs? can we eliminate those dependencies too? x
|
||||
|
||||
- the `restic` package uses the `aws`. SDK to get the bucket region for the AWS object store (https://github.com/carlisia/velero/blob/32d46871ccbc6b03e415d1e3d4ad9ae2268b977b/pkg/restic/config.go#L41)
|
||||
- could not find usage of the cloud provider SDKs anywhere else.
|
||||
|
||||
Plugins such as the pod -> pvc -> pv backupitemaction ones make sense to stay in the core repo as they provide some important logic that just happens to be implemented in a plugin.
|
||||
|
||||
### Upgrade
|
||||
|
||||
The documentation for how to fresh install the out-of-tree plugin with Velero v1.2 will be specified together with the documentation for the install changes on issue https://github.com/heptio/velero/issues/1740.
|
||||
|
||||
For upgrades, we will provide a script that will:
|
||||
|
||||
- change the tag on the Velero deployment yaml for both the main image and any of th three plugins installed.
|
||||
- rename existing aws, azure or gcp plugin names to have the `velero.io/` namespace preceding the name (ex: `velero.io/aws).
|
||||
|
||||
Alternatively, we could add CLI `velero upgrade` command that would make these changes. Ex: `velero upgrade 1.3` would upgrade from `v1.2` to `v1.3`.
|
||||
|
||||
For upgrading:
|
||||
|
||||
- Edit the provider field in the backupstoragelocations and volumesnapshotlocations CRDs to include the new namespace.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
We considered having the plugins all live in the same GH repository. The downside of that approach is ending up with a binary and image bigger than necessary, since they would contain the SDKs of all three providers.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Ensure that only the Velero core team has maintainer/owner privileges.
|
||||
@@ -7,5 +7,5 @@ This directory contains sample YAML config files that can be used for exploring
|
||||
* `nginx-app/`: A sample nginx app that can be used to test backups and restores.
|
||||
|
||||
|
||||
[0]: /docs/contributions/minio.md
|
||||
[0]: /docs/get-started.md
|
||||
[1]: https://github.com/minio/minio
|
||||
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.17.6
|
||||
- image: nginx:1.7.9
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
@@ -29,7 +29,6 @@ metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
# Optional:
|
||||
# storageClassName: <YOUR_STORAGE_CLASS_NAME>
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -63,7 +62,7 @@ spec:
|
||||
persistentVolumeClaim:
|
||||
claimName: nginx-logs
|
||||
containers:
|
||||
- image: nginx:1.17.6
|
||||
- image: nginx:1.7.9
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -71,7 +70,7 @@ spec:
|
||||
- mountPath: "/var/log/nginx"
|
||||
name: nginx-logs
|
||||
readOnly: false
|
||||
- image: ubuntu:bionic
|
||||
- image: gcr.io/heptio-images/fsfreeze-pause:latest
|
||||
name: fsfreeze
|
||||
securityContext:
|
||||
privileged: true
|
||||
@@ -79,12 +78,7 @@ spec:
|
||||
- mountPath: "/var/log/nginx"
|
||||
name: nginx-logs
|
||||
readOnly: false
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "-c"
|
||||
- "sleep infinity"
|
||||
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
42
go.mod
42
go.mod
@@ -1,42 +0,0 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.46.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.9.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.13.12
|
||||
github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29 // indirect
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible
|
||||
github.com/go-ini/ini v1.28.2 // indirect
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gofrs/uuid v3.2.0+incompatible
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
|
||||
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v1.0.0
|
||||
github.com/robfig/cron v0.0.0-20170309132418-df38d32658d8
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
github.com/spf13/afero v1.2.2
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.4.0
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553
|
||||
google.golang.org/grpc v1.26.0
|
||||
k8s.io/api v0.17.4
|
||||
k8s.io/apiextensions-apiserver v0.17.4
|
||||
k8s.io/apimachinery v0.17.4
|
||||
k8s.io/cli-runtime v0.17.4
|
||||
k8s.io/client-go v0.17.4
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/utils v0.0.0-20191218082557-f07c713de883 // indirect
|
||||
)
|
||||
583
go.sum
583
go.sum
@@ -1,583 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.2 h1:CzaxDL0yS5OHsygr9wRodEjP93JHp67vzlRDGlVZTJw=
|
||||
cloud.google.com/go v0.46.2/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible h1:PkmdmQUmeSdQQ5258f4SyCf2Zcz0w67qztEg37cOR7U=
|
||||
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.13.12 h1:+L7aikwNnPSzdbgbbcAc0M9spXpvD64Kug3w6CnJxnw=
|
||||
github.com/aws/aws-sdk-go v1.13.12/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29 h1:llBx5m8Gk0lrAaiLud2wktkX/e8haX7Ru0oVfQqtZQ4=
|
||||
github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-ini/ini v1.28.2 h1:drmmYv7psRpoGZkPtPKKTB+ZFSnvmwCMfNj5o1nLh2Y=
|
||||
github.com/go-ini/ini v1.28.2/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
|
||||
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
|
||||
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
|
||||
github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
|
||||
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
|
||||
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
|
||||
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
|
||||
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26 h1:sADP8l/FAtMyWJ9GIcQT/04Ae80ZZ75ogOrtW0DIZhc=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.7.0/go.mod h1:bze+2G9+cmoHxN6+WyG1qT4MDxgZJMLGwc7V4acPNm0=
|
||||
github.com/kubernetes-csi/csi-test v2.0.0+incompatible/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0=
|
||||
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.0 h1:a1cpbNAdOTHO7Lk5UO5tjcbYPEEamIxmzATt+pKoDhc=
|
||||
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.0/go.mod h1:dV5oB3U62KBdlf9ADWkMmjGd3USauqQtwIm2OZb5mqI=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
|
||||
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/robfig/cron v0.0.0-20170309132418-df38d32658d8 h1:b904/jbnmYuSPd5ojGzVTLjKPVTSj3t/e1vEPiPGjEg=
|
||||
github.com/robfig/cron v0.0.0-20170309132418-df38d32658d8/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220220014-0732a990476f h1:72l8qCJ1nGxMGH26QVBVIxKd/D34cfGt0OvrPtpemyY=
|
||||
golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191220175831-5c49e3ecc1c1 h1:PlscBL5CvF+v1mNR82G+i4kACGq2JQvKDnNq7LSS65o=
|
||||
google.golang.org/genproto v0.0.0-20191220175831-5c49e3ecc1c1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
|
||||
k8s.io/api v0.17.4 h1:HbwOhDapkguO8lTAE8OX3hdF2qp8GtpC9CW/MQATXXo=
|
||||
k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA=
|
||||
k8s.io/apiextensions-apiserver v0.17.4 h1:ZKFnw3cJrGZ/9s6y+DerTF4FL+dmK0a04A++7JkmMho=
|
||||
k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI=
|
||||
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.17.1-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw=
|
||||
k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
|
||||
k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I=
|
||||
k8s.io/cli-runtime v0.17.4 h1:ZIJdxpBEszZqUhydrCoiI5rLXS2J/1AF5xFok2QJ9bc=
|
||||
k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc=
|
||||
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
|
||||
k8s.io/client-go v0.17.4 h1:VVdVbpTY70jiNHS1eiFkUt7ZIJX3txd29nDxxXH4en8=
|
||||
k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc=
|
||||
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/code-generator v0.17.4/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ=
|
||||
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
|
||||
k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kubernetes v1.14.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191218082557-f07c713de883 h1:TA8t8OLS8m3/0dtTckekO0pCQ7qMnD19fsZTQEgCSKQ=
|
||||
k8s.io/utils v0.0.0-20191218082557-f07c713de883/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2018, 2019, 2020 the Velero contributors.
|
||||
# Copyright 2018 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,36 +12,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.14
|
||||
FROM golang:1.12
|
||||
|
||||
ENV GO111MODULE=on
|
||||
# Use a proxy for go modules to reduce the likelihood of various hosts being down and breaking the build
|
||||
ENV GOPROXY=https://proxy.golang.org
|
||||
|
||||
# get code-generation tools (for now keep in GOPATH since they're not fully modules-compatible yet)
|
||||
RUN mkdir -p /go/src/k8s.io
|
||||
WORKDIR /go/src/k8s.io
|
||||
RUN git config --global advice.detachedHead false
|
||||
RUN git clone -b kubernetes-1.17.0 https://github.com/kubernetes/code-generator
|
||||
|
||||
# get controller-tools
|
||||
RUN go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.4
|
||||
|
||||
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
|
||||
# is not important)
|
||||
RUN go get golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
|
||||
|
||||
# get protoc compiler and golang plugin
|
||||
WORKDIR /root
|
||||
RUN apt-get update && apt-get install -y unzip
|
||||
RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v3.9.1/protoc-3.9.1-linux-x86_64.zip && \
|
||||
unzip protoc-3.9.1-linux-x86_64.zip && \
|
||||
mv bin/protoc /usr/bin/protoc && \
|
||||
chmod +x /usr/bin/protoc
|
||||
RUN go get github.com/golang/protobuf/protoc-gen-go@v1.0.0
|
||||
|
||||
# get goreleaser
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.120.8/goreleaser_Linux_x86_64.tar.gz && \
|
||||
tar xvf goreleaser_Linux_x86_64.tar.gz && \
|
||||
mv goreleaser /usr/bin/goreleaser && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
RUN mkdir -p /go/src/k8s.io && \
|
||||
cd /go/src/k8s.io && \
|
||||
git config --global advice.detachedHead false && \
|
||||
git clone -b kubernetes-1.14.0 https://github.com/kubernetes/code-generator && \
|
||||
git clone -b kubernetes-1.14.0 https://github.com/kubernetes/apimachinery && \
|
||||
go get golang.org/x/tools/cmd/goimports && \
|
||||
cd /go/src/golang.org/x/tools && \
|
||||
git checkout 40a48ad93fbe707101afb2099b738471f70594ec && \
|
||||
go install ./cmd/goimports && \
|
||||
echo chmod -R a+w /go
|
||||
|
||||
@@ -39,13 +39,10 @@ if [ -z "${VERSION}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${GIT_SHA}" ]; then
|
||||
echo "GIT_SHA must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export CGO_ENABLED=0
|
||||
|
||||
GIT_SHA=$(git rev-parse HEAD)
|
||||
GIT_DIRTY=$(git status --porcelain 2> /dev/null)
|
||||
if [[ -z "${GIT_DIRTY}" ]]; then
|
||||
GIT_TREE_STATE=clean
|
||||
else
|
||||
|
||||
@@ -18,8 +18,6 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
CHANGELOG_PATH='changelogs/unreleased'
|
||||
UNRELEASED=$(ls -t ${CHANGELOG_PATH})
|
||||
echo -e "Generating CHANGELOG markdown from ${CHANGELOG_PATH}\n"
|
||||
@@ -27,9 +25,7 @@ for entry in $UNRELEASED
|
||||
do
|
||||
IFS=$'-' read -ra pruser <<<"$entry"
|
||||
contents=$(cat ${CHANGELOG_PATH}/${entry})
|
||||
pr=${pruser[0]}
|
||||
user=$(join '-' ${pruser[@]:1})
|
||||
echo " * ${contents} (#${pr}, @${user})"
|
||||
echo " * ${contents} (#${pruser[0]}, @${pruser[1]})"
|
||||
done
|
||||
echo -e "\nCopy and paste the list above in to the appropriate CHANGELOG file."
|
||||
echo "Be sure to run: git rm ${CHANGELOG_PATH}/*"
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This code embeds the CRD manifests in pkg/generated/crds/manifests in
|
||||
// pkg/generated/crds/crds.go.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// This is relative to pkg/generated/crds
|
||||
const goHeaderFile = "../../../hack/boilerplate.go.txt"
|
||||
|
||||
const tpl = `{{.GoHeader}}
|
||||
// Code generated by crds_generate.go; DO NOT EDIT.
|
||||
|
||||
package crds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
|
||||
apiextinstall "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
var rawCRDs = [][]byte{
|
||||
{{- range .RawCRDs }}
|
||||
[]byte({{ . }}),
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
var CRDs = crds()
|
||||
|
||||
func crds() []*apiextv1beta1.CustomResourceDefinition {
|
||||
apiextinstall.Install(scheme.Scheme)
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
var objs []*apiextv1beta1.CustomResourceDefinition
|
||||
for _, crd := range rawCRDs {
|
||||
gzr, err := gzip.NewReader(bytes.NewReader(crd))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bytes, err := ioutil.ReadAll(gzr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
gzr.Close()
|
||||
|
||||
obj, _, err := decode(bytes, nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
objs = append(objs, obj.(*apiextv1beta1.CustomResourceDefinition))
|
||||
}
|
||||
return objs
|
||||
}
|
||||
`
|
||||
|
||||
type templateData struct {
|
||||
GoHeader string
|
||||
RawCRDs []string
|
||||
}
|
||||
|
||||
func main() {
|
||||
headerBytes, err := ioutil.ReadFile(goHeaderFile)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
data := templateData{
|
||||
GoHeader: string(headerBytes),
|
||||
}
|
||||
|
||||
// This is relative to pkg/generated/crds
|
||||
manifests, err := ioutil.ReadDir("manifests")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
for _, crd := range manifests {
|
||||
file, err := os.Open("manifests/" + crd.Name())
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// gzip compress manifest
|
||||
var buf bytes.Buffer
|
||||
gzw := gzip.NewWriter(&buf)
|
||||
if _, err := io.Copy(gzw, file); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
file.Close()
|
||||
gzw.Close()
|
||||
|
||||
data.RawCRDs = append(data.RawCRDs, fmt.Sprintf("%q", buf.Bytes()))
|
||||
}
|
||||
|
||||
t, err := template.New("crds").Parse(tpl)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
out, err := os.Create("crds.go")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if err := t.Execute(out, data); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# docker-push is invoked by the CI/CD system to deploy docker images to Docker Hub.
|
||||
# It will build images for all commits to master and all git tags.
|
||||
# The highest, non-prerelease semantic version will also be given the `latest` tag.
|
||||
|
||||
set +x
|
||||
|
||||
if [[ -z "$CI" ]]; then
|
||||
echo "This script is intended to be run only on Github Actions." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Return value is written into HIGHEST
|
||||
HIGHEST=""
|
||||
function highest_release() {
|
||||
# Loop through the tags since pre-release versions come before the actual versions.
|
||||
# Iterate til we find the first non-pre-release
|
||||
|
||||
# This is not necessarily the most recently made tag; instead, we want it to be the highest semantic version.
|
||||
# The most recent tag could potentially be a lower semantic version, made as a point release for a previous series.
|
||||
# As an example, if v1.3.0 exists and we create v1.2.2, v1.3.0 should still be `latest`.
|
||||
# `git describe --tags $(git rev-list --tags --max-count=1)` would return the most recently made tag.
|
||||
|
||||
for t in $(git tag -l --sort=-v:refname);
|
||||
do
|
||||
# If the tag has alpha, beta or rc in it, it's not "latest"
|
||||
if [[ "$t" == *"beta"* || "$t" == *"alpha"* || "$t" == *"rc"* ]]; then
|
||||
continue
|
||||
fi
|
||||
HIGHEST="$t"
|
||||
break
|
||||
done
|
||||
}
|
||||
|
||||
triggeredBy=$(echo $GITHUB_REF | cut -d / -f 2)
|
||||
if [[ "$triggeredBy" == "heads" ]]; then
|
||||
BRANCH=$(echo $GITHUB_REF | cut -d / -f 3)
|
||||
TAG=
|
||||
elif [[ "$triggeredBy" == "tags" ]]; then
|
||||
BRANCH=
|
||||
TAG=$(echo $GITHUB_REF | cut -d / -f 3)
|
||||
fi
|
||||
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
VERSION="$BRANCH"
|
||||
elif [[ ! -z "$TAG" ]]; then
|
||||
# Explicitly checkout tags when building from a git tag.
|
||||
# This is not needed when building from master
|
||||
git fetch --tags
|
||||
# Calculate the latest release if there's a tag.
|
||||
highest_release
|
||||
VERSION="$TAG"
|
||||
else
|
||||
echo "We're not on master and we're not building a tag, exit early."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Assume we're not tagging `latest` by default, and never on master.
|
||||
TAG_LATEST=false
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
echo "Building master, not tagging latest."
|
||||
elif [[ "$TAG" == "$HIGHEST" ]]; then
|
||||
TAG_LATEST=true
|
||||
fi
|
||||
|
||||
# Debugging info
|
||||
echo "Highest tag found: $HIGHEST"
|
||||
echo "BRANCH: $BRANCH"
|
||||
echo "TAG: $TAG"
|
||||
echo "TAG_LATEST: $TAG_LATEST"
|
||||
|
||||
echo "Building and pushing container images."
|
||||
|
||||
VERSION="$VERSION" TAG_LATEST="$TAG_LATEST" make all-containers all-push all-manifests
|
||||
25
hack/gcr-push.sh
Executable file
25
hack/gcr-push.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Return value is written into LATEST
|
||||
LATEST=false
|
||||
function is_latest_release() {
|
||||
# If the tag has alpha, beta or rc in it, it's not "latest"
|
||||
if [[ "$TRAVIS_TAG" == *"beta"* || "$TRAVIS_TAG" == *"alpha"* || "$TRAVIS_TAG" == *"rc"* ]]; then
|
||||
LATEST=false
|
||||
else
|
||||
LATEST=true
|
||||
fi
|
||||
}
|
||||
|
||||
# Always publish for master
|
||||
if [ "$BRANCH" == "master" ]; then
|
||||
VERSION="$BRANCH" make all-containers all-push
|
||||
fi
|
||||
|
||||
# Publish when TRAVIS_TAG is defined.
|
||||
if [ ! -z "$TRAVIS_TAG" ]; then
|
||||
# Check if this is the latest release.
|
||||
is_latest_release
|
||||
|
||||
VERSION="$TRAVIS_TAG" TAG_LATEST="$LATEST" make all-containers all-push
|
||||
fi
|
||||
@@ -66,8 +66,8 @@ rm site/_data/$NEW_DOCS_TOC.yml && cp site/_data/master-toc.yml site/_data/$NEW_
|
||||
# so check which OS we're running on.
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
echo "[OS X] updating version-specific links"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i '' "s|https://velero.io/docs/master|https://velero.io/docs/$VELERO_VERSION|g"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i '' "s|https://github.com/vmware-tanzu/velero/blob/master|https://github.com/vmware-tanzu/velero/blob/$VELERO_VERSION|g"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i '' "s|https://velero.io/docs/master|https://velero.io/docs/$NEW_DOCS_VERSION|g"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i '' "s|https://github.com/heptio/velero/blob/master|https://github.com/heptio/velero/blob/$NEW_DOCS_VERSION|g"
|
||||
|
||||
echo "[OS X] Updating latest version in _config.yml"
|
||||
sed -i '' "s/latest: ${PREVIOUS_DOCS_VERSION}/latest: ${NEW_DOCS_VERSION}/" site/_config.yml
|
||||
@@ -86,8 +86,8 @@ ${NEW_DOCS_VERSION}: ${NEW_DOCS_TOC}
|
||||
|
||||
else
|
||||
echo "[Linux] updating version-specific links"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i'' "s|https://velero.io/docs/master|https://velero.io/docs/$VELERO_VERSION|g"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i'' "s|https://github.com/vmware-tanzu/velero/blob/master|https://github.com/vmware-tanzu/velero/blob/$VELERO_VERSION|g"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i'' "s|https://velero.io/docs/master|https://velero.io/docs/$NEW_DOCS_VERSION|g"
|
||||
find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i'' "s|https://github.com/heptio/velero/blob/master|https://github.com/heptio/velero/blob/$NEW_DOCS_VERSION|g"
|
||||
|
||||
echo "[Linux] Updating latest version in _config.yml"
|
||||
sed -i'' "s/latest: ${PREVIOUS_DOCS_VERSION}/latest: ${NEW_DOCS_VERSION}/" site/_config.yml
|
||||
|
||||
23
hack/generate-proto.sh
Executable file
23
hack/generate-proto.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
echo "Running protoc"
|
||||
|
||||
protoc pkg/plugin/proto/*.proto --go_out=plugins=grpc:pkg/plugin/generated/ -I pkg/plugin/proto/
|
||||
|
||||
echo "Success!"
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [ -z "${RESTIC_VERSION}" ]; then
|
||||
echo "RESTIC_VERSION must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "_output/bin/linux/ppc64le/" ]; then
|
||||
mkdir -p _output/bin/linux/ppc64le/
|
||||
fi
|
||||
|
||||
wget --quiet https://oplab9.parqtec.unicamp.br/pub/ppc64el/restic/restic-${RESTIC_VERSION}
|
||||
mv restic-${RESTIC_VERSION} _output/bin/linux/ppc64le/restic
|
||||
chmod +x _output/bin/linux/ppc64le/restic
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/cli/bug"
|
||||
"github.com/heptio/velero/pkg/cmd/cli/bug"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// This regex should match both our GA format (example: v1.4.3) and pre-release format (v1.2.4-beta.2)
|
||||
// The following sub-capture groups are defined:
|
||||
// major
|
||||
// minor
|
||||
// patch
|
||||
// prerelease (this will be alpha/beta followed by a ".", followed by 1 or more digits (alpha.5)
|
||||
var release_regex *regexp.Regexp = regexp.MustCompile("^v(?P<major>[[:digit:]]+)\\.(?P<minor>[[:digit:]]+)\\.(?P<patch>[[:digit:]]+)(-{1}(?P<prerelease>(alpha|beta)\\.[[:digit:]]+))*")
|
||||
|
||||
// This small program exists because checking the VELERO_VERSION rules in bash is difficult, and difficult to test for correctness.
|
||||
// Calling it with --verify will verify whether or not the VELERO_VERSION environment variable is a valid version string, without parsing for its components.
|
||||
// Calling it without --verify will try to parse the version into its component pieces.
|
||||
func main() {
|
||||
|
||||
velero_version := os.Getenv("VELERO_VERSION")
|
||||
|
||||
submatches := reSubMatchMap(release_regex, velero_version)
|
||||
|
||||
// Didn't match the regex, exit.
|
||||
if len(submatches) == 0 {
|
||||
fmt.Printf("VELERO_VERSION of %s was not valid. Please correct the value and retry.", velero_version)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(os.Args) > 1 && os.Args[1] == "--verify" {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Send these in a bash variable format to stdout, so that they can be consumed by bash scripts that call the go program.
|
||||
fmt.Printf("VELERO_MAJOR=%s\n", submatches["major"])
|
||||
fmt.Printf("VELERO_MINOR=%s\n", submatches["minor"])
|
||||
fmt.Printf("VELERO_PATCH=%s\n", submatches["patch"])
|
||||
fmt.Printf("VELERO_PRERELEASE=%s\n", submatches["prerelease"])
|
||||
}
|
||||
|
||||
// reSubMatchMap returns a map with the named submatches within a regular expression populated as keys, and their matched values within a given string as values.
|
||||
// If no matches are found, a nil map is returned
|
||||
func reSubMatchMap(r *regexp.Regexp, s string) map[string]string {
|
||||
match := r.FindStringSubmatch(s)
|
||||
submatches := make(map[string]string)
|
||||
if len(match) == 0 {
|
||||
return submatches
|
||||
}
|
||||
for i, name := range r.SubexpNames() {
|
||||
// 0 will always be empty from the return values of SubexpNames's documentation, so skip it.
|
||||
if i != 0 {
|
||||
submatches[name] = match[i]
|
||||
}
|
||||
}
|
||||
|
||||
return submatches
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRegexMatching(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
expectMatch bool
|
||||
}{
|
||||
{
|
||||
version: "v1.4.0",
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
version: "v2.0.0",
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
version: "v1.5.0-alpha.1",
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
version: "v1.16.1320-beta.14",
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
version: "1.0.0",
|
||||
expectMatch: false,
|
||||
},
|
||||
{
|
||||
// this is true because while the "--" is invalid, v1.0.0 is a valid part of the regex
|
||||
version: "v1.0.0--beta.1",
|
||||
expectMatch: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("Testing version string %s", test.version)
|
||||
t.Run(name, func(t *testing.T) {
|
||||
results := reSubMatchMap(release_regex, test.version)
|
||||
|
||||
if len(results) == 0 && test.expectMatch {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if len(results) > 0 && !test.expectMatch {
|
||||
fmt.Printf("%v", results)
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This script will do the necessary checks and actions to create a release of Velero.
|
||||
# It will first validate that all prerequisites are met, then verify the version string is what the user expects.
|
||||
# A git tag will be created and pushed to GitHub, and GoReleaser will be invoked.
|
||||
|
||||
# This script is meant to be a combination of documentation and executable.
|
||||
# If you have questions at any point, please stop and ask!
|
||||
|
||||
# Directory in which the script itself resides, so we can use it for calling programs that are in the same directory.
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
function tag_and_push() {
|
||||
echo "Tagging and pushing $VELERO_VERSION"
|
||||
git tag $VELERO_VERSION
|
||||
git push $VELERO_VERSION
|
||||
}
|
||||
|
||||
# For now, have the person doing the release pass in the VELERO_VERSION variable as an environment variable.
|
||||
# In the future, we might be able to inspect git via `git describe --abbrev=0` to get a hint for it.
|
||||
if [[ -z "$VELERO_VERSION" ]]; then
|
||||
printf "The \$VELERO_VERSION environment variable is not set. Please set it with\n\texport VELERO_VERSION=v<version.to.release>\nthen try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure the user's provided their github token, so we can give it to goreleaser.
|
||||
if [[ -z "$GITHUB_TOKEN" ]]; then
|
||||
printf "The GITHUB_TOKEN environment variable is not set. Please set it with\n\t export GITHUB_TOKEN=<your github token>\n then try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
Ensure that we have a clean working tree before we let any changes happen, especially important for cutting release branches.
|
||||
if [[ -n $(git status --short) ]]; then
|
||||
echo "Your git working directory is dirty! Please clean up untracked files and stash any changes before proceeding."
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# Make sure that there's no issue with the environment variable's format before trying to eval the parsed version.
|
||||
if ! go run $DIR/chk_version.go --verify; then
|
||||
exit 2
|
||||
fi
|
||||
# Since we're past the validation of the VELERO_VERSION, parse the version's individual components.
|
||||
eval $(go run $DIR/chk_version.go)
|
||||
|
||||
|
||||
printf "To clarify, you've provided a version string of $VELERO_VERSION.\n"
|
||||
printf "Based on this, the following assumptions have been made: \n"
|
||||
|
||||
[[ "$VELERO_PATCH" != 0 ]] && printf "*\t This is a patch release.\n"
|
||||
|
||||
# -n is "string is non-empty"
|
||||
[[ -n $VELERO_PRERELEASE ]] && printf "*\t This is a pre-release.\n"
|
||||
|
||||
# -z is "string is empty"
|
||||
[[ -z $VELERO_PRERELEASE ]] && printf "*\t This is a GA release.\n"
|
||||
|
||||
echo "If this is all correct, press enter/return to proceed to TAG THE RELEASE and UPLOAD THE TAG TO GITHUB."
|
||||
echo "Otherwise, press ctrl-c to CANCEL the process without making any changes."
|
||||
|
||||
read -p "Ready to continue? "
|
||||
|
||||
echo "Alright, let's go."
|
||||
|
||||
echo "Pulling down all git tags and branches before doing any work."
|
||||
git fetch upstream --all --tags
|
||||
|
||||
# If we've got a patch release, we'll need to create a release branch for it.
|
||||
if [[ "$VELERO_PATCH" > 0 ]]; then
|
||||
release_branch_name=release-$VELERO_MAJOR.$VELERO_MINOR
|
||||
|
||||
# Check if the branch exists, creating it if not.
|
||||
# The fetch command above should have gotten all the upstream branches, so we can safely assume this check is local & upstream branches.
|
||||
if [[ -z $(git branch | grep $release_branch_name) ]]; then
|
||||
git checkout -b $release_branch_name
|
||||
echo "Release branch made."
|
||||
else
|
||||
echo "Release branch $release_branch_name exists already."
|
||||
git checkout $release_branch_name
|
||||
fi
|
||||
|
||||
echo "Now you'll need to cherry-pick any relevant git commits into this release branch."
|
||||
echo "Either pause this script with ctrl-z, or open a new terminal window and do the cherry-picking."
|
||||
read -p "Press enter when you're done cherry-picking. THIS WILL MAKE A TAG PUSH THE BRANCH TO UPSTREAM"
|
||||
|
||||
# TODO can/should we add a way to review the cherry-picked commits before the push?
|
||||
|
||||
echo "Pushing $release_branch_name to upstream remote"
|
||||
git push --set-upstream upstream/$release_branch_name $release_branch_name
|
||||
|
||||
tag_and_push
|
||||
else
|
||||
echo "Checking out upstream/master."
|
||||
git checkout upstream/master
|
||||
|
||||
tag_and_push
|
||||
fi
|
||||
|
||||
|
||||
|
||||
echo "Invoking Goreleaser to create the GitHub release."
|
||||
RELEASE_NOTES_FILE=changelogs/CHANGELOG-$VELERO_MAJOR.$VELERO_MINOR.md \
|
||||
PUBLISH=TRUE \
|
||||
make release
|
||||
@@ -33,7 +33,7 @@ if ! command -v goimports > /dev/null; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
files="$(find . -type f -name '*.go' -not -path './.go/*' -not -path './vendor/*' -not -path './site/*' -not -path '*/generated/*' -not -name 'zz_generated*' -not -path '*/mocks/*')"
|
||||
files="$(find . -type f -name '*.go' -not -path './vendor/*' -not -path './site/*' -not -path './pkg/generated/*' -not -name 'zz_generated*')"
|
||||
echo "${ACTION} gofmt"
|
||||
for file in ${files}; do
|
||||
output=$(gofmt "${MODE}" -s "${file}")
|
||||
@@ -50,7 +50,7 @@ fi
|
||||
|
||||
echo "${ACTION} goimports"
|
||||
for file in ${files}; do
|
||||
output=$(goimports "${MODE}" -local github.com/vmware-tanzu/velero "${file}")
|
||||
output=$(goimports "${MODE}" -local github.com/heptio/velero "${file}")
|
||||
if [[ -n "${output}" ]]; then
|
||||
VERIFY_IMPORTS_FAILED=1
|
||||
echo "${output}"
|
||||
|
||||
@@ -19,8 +19,6 @@ set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
# this script expects to be run from the root of the Velero repo.
|
||||
|
||||
if [[ -z "${GOPATH}" ]]; then
|
||||
GOPATH=~/go
|
||||
fi
|
||||
@@ -30,23 +28,12 @@ if [[ ! -d "${GOPATH}/src/k8s.io/code-generator" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v controller-gen > /dev/null; then
|
||||
echo "controller-gen is missing"
|
||||
exit 1
|
||||
fi
|
||||
cd ${GOPATH}/src/k8s.io/code-generator
|
||||
|
||||
${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \
|
||||
./generate-groups.sh \
|
||||
all \
|
||||
github.com/vmware-tanzu/velero/pkg/generated \
|
||||
github.com/vmware-tanzu/velero/pkg/apis \
|
||||
github.com/heptio/velero/pkg/generated \
|
||||
github.com/heptio/velero/pkg/apis \
|
||||
"velero:v1" \
|
||||
--go-header-file ./hack/boilerplate.go.txt \
|
||||
--output-base ../../.. \
|
||||
--go-header-file ${GOPATH}/src/github.com/heptio/velero/hack/boilerplate.go.txt \
|
||||
$@
|
||||
|
||||
controller-gen \
|
||||
crd:crdVersions=v1beta1,preserveUnknownFields=false \
|
||||
output:dir=./pkg/generated/crds/manifests \
|
||||
paths=./pkg/apis/velero/v1/...
|
||||
|
||||
go generate ./pkg/generated/crds
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017, 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
echo "Updating plugin proto"
|
||||
|
||||
protoc pkg/plugin/proto/*.proto --go_out=plugins=grpc:pkg/plugin/generated/ -I pkg/plugin/proto/
|
||||
|
||||
echo "Updating plugin proto - done!"
|
||||
@@ -17,13 +17,3 @@
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
${HACK_DIR}/update-generated-crd-code.sh --verify-only
|
||||
|
||||
# ensure no changes to generated CRDs
|
||||
if ! git diff --exit-code pkg/generated/crds/crds.go >/dev/null; then
|
||||
# revert changes to state before running CRD generation to stay consistent
|
||||
# with code-generator `--verify-only` option which discards generated changes
|
||||
git checkout pkg/generated/crds
|
||||
|
||||
echo "CRD verification - failed! Generated CRDs are out-of-date, please run 'make update'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -24,72 +24,52 @@ import (
|
||||
type BackupSpec struct {
|
||||
// IncludedNamespaces is a slice of namespace names to include objects
|
||||
// from. If empty, all namespaces are included.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludedNamespaces []string `json:"includedNamespaces,omitempty"`
|
||||
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||
|
||||
// ExcludedNamespaces contains a list of namespaces that are not
|
||||
// included in the backup.
|
||||
// +optional
|
||||
// +nullable
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"`
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||
|
||||
// IncludedResources is a slice of resource names to include
|
||||
// in the backup. If empty, all resources are included.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludedResources []string `json:"includedResources,omitempty"`
|
||||
IncludedResources []string `json:"includedResources"`
|
||||
|
||||
// ExcludedResources is a slice of resource names that are not
|
||||
// included in the backup.
|
||||
// +optional
|
||||
// +nullable
|
||||
ExcludedResources []string `json:"excludedResources,omitempty"`
|
||||
ExcludedResources []string `json:"excludedResources"`
|
||||
|
||||
// LabelSelector is a metav1.LabelSelector to filter with
|
||||
// when adding individual objects to the backup. If empty
|
||||
// or nil, all objects are included. Optional.
|
||||
// +optional
|
||||
// +nullable
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
|
||||
// SnapshotVolumes specifies whether to take cloud snapshots
|
||||
// of any PV's referenced in the set of objects included
|
||||
// in the Backup.
|
||||
// +optional
|
||||
// +nullable
|
||||
SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"`
|
||||
|
||||
// TTL is a time.Duration-parseable string describing how long
|
||||
// the Backup should be retained for.
|
||||
// +optional
|
||||
TTL metav1.Duration `json:"ttl,omitempty"`
|
||||
TTL metav1.Duration `json:"ttl"`
|
||||
|
||||
// IncludeClusterResources specifies whether cluster-scoped resources
|
||||
// should be included for consideration in the backup.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludeClusterResources *bool `json:"includeClusterResources,omitempty"`
|
||||
IncludeClusterResources *bool `json:"includeClusterResources"`
|
||||
|
||||
// Hooks represent custom behaviors that should be executed at different phases of the backup.
|
||||
// +optional
|
||||
Hooks BackupHooks `json:"hooks,omitempty"`
|
||||
Hooks BackupHooks `json:"hooks"`
|
||||
|
||||
// StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
|
||||
// +optional
|
||||
StorageLocation string `json:"storageLocation,omitempty"`
|
||||
StorageLocation string `json:"storageLocation"`
|
||||
|
||||
// VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup.
|
||||
// +optional
|
||||
VolumeSnapshotLocations []string `json:"volumeSnapshotLocations,omitempty"`
|
||||
VolumeSnapshotLocations []string `json:"volumeSnapshotLocations"`
|
||||
}
|
||||
|
||||
// BackupHooks contains custom behaviors that should be executed at different phases of the backup.
|
||||
type BackupHooks struct {
|
||||
// Resources are hooks that should be executed when backing up individual instances of a resource.
|
||||
// +optional
|
||||
// +nullable
|
||||
Resources []BackupResourceHookSpec `json:"resources,omitempty"`
|
||||
Resources []BackupResourceHookSpec `json:"resources"`
|
||||
}
|
||||
|
||||
// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on
|
||||
@@ -97,42 +77,23 @@ type BackupHooks struct {
|
||||
type BackupResourceHookSpec struct {
|
||||
// Name is the name of this hook.
|
||||
Name string `json:"name"`
|
||||
|
||||
// IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies
|
||||
// to all namespaces.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludedNamespaces []string `json:"includedNamespaces,omitempty"`
|
||||
|
||||
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||
// ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
|
||||
// +optional
|
||||
// +nullable
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"`
|
||||
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||
// IncludedResources specifies the resources to which this hook spec applies. If empty, it applies
|
||||
// to all resources.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludedResources []string `json:"includedResources,omitempty"`
|
||||
|
||||
IncludedResources []string `json:"includedResources"`
|
||||
// ExcludedResources specifies the resources to which this hook spec does not apply.
|
||||
// +optional
|
||||
// +nullable
|
||||
ExcludedResources []string `json:"excludedResources,omitempty"`
|
||||
|
||||
ExcludedResources []string `json:"excludedResources"`
|
||||
// LabelSelector, if specified, filters the resources to which this hook spec applies.
|
||||
// +optional
|
||||
// +nullable
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
|
||||
// PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup.
|
||||
// These are executed before any "additional items" from item actions are processed.
|
||||
// +optional
|
||||
PreHooks []BackupResourceHook `json:"pre,omitempty"`
|
||||
|
||||
// PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup.
|
||||
// These are executed after all "additional items" from item actions are processed.
|
||||
// +optional
|
||||
PostHooks []BackupResourceHook `json:"post,omitempty"`
|
||||
}
|
||||
|
||||
@@ -146,32 +107,23 @@ type BackupResourceHook struct {
|
||||
type ExecHook struct {
|
||||
// Container is the container in the pod where the command should be executed. If not specified,
|
||||
// the pod's first container is used.
|
||||
// +optional
|
||||
Container string `json:"container,omitempty"`
|
||||
|
||||
Container string `json:"container"`
|
||||
// Command is the command and arguments to execute.
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
Command []string `json:"command"`
|
||||
|
||||
// OnError specifies how Velero should behave if it encounters an error executing this hook.
|
||||
// +optional
|
||||
OnError HookErrorMode `json:"onError,omitempty"`
|
||||
|
||||
OnError HookErrorMode `json:"onError"`
|
||||
// Timeout defines the maximum amount of time Velero should wait for the hook to complete before
|
||||
// considering the execution a failure.
|
||||
// +optional
|
||||
Timeout metav1.Duration `json:"timeout,omitempty"`
|
||||
Timeout metav1.Duration `json:"timeout"`
|
||||
}
|
||||
|
||||
// HookErrorMode defines how Velero should treat an error from a hook.
|
||||
// +kubebuilder:validation:Enum=Continue;Fail
|
||||
type HookErrorMode string
|
||||
|
||||
const (
|
||||
// HookErrorModeContinue means that an error from a hook is acceptable, and the backup can
|
||||
// proceed.
|
||||
HookErrorModeContinue HookErrorMode = "Continue"
|
||||
|
||||
// HookErrorModeFail means that an error from a hook is problematic, and the backup should be in
|
||||
// error.
|
||||
HookErrorModeFail HookErrorMode = "Fail"
|
||||
@@ -179,7 +131,6 @@ const (
|
||||
|
||||
// BackupPhase is a string representation of the lifecycle phase
|
||||
// of a Velero backup.
|
||||
// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;Completed;PartiallyFailed;Failed;Deleting
|
||||
type BackupPhase string
|
||||
|
||||
const (
|
||||
@@ -212,89 +163,48 @@ const (
|
||||
|
||||
// BackupStatus captures the current status of a Velero backup.
|
||||
type BackupStatus struct {
|
||||
// Version is the backup format major version.
|
||||
// Deprecated: Please see FormatVersion
|
||||
// +optional
|
||||
Version int `json:"version,omitempty"`
|
||||
|
||||
// FormatVersion is the backup format version, including major, minor, and patch version.
|
||||
// +optional
|
||||
FormatVersion string `json:"formatVersion,omitempty"`
|
||||
// Version is the backup format version.
|
||||
Version int `json:"version"`
|
||||
|
||||
// Expiration is when this Backup is eligible for garbage-collection.
|
||||
// +optional
|
||||
// +nullable
|
||||
Expiration *metav1.Time `json:"expiration,omitempty"`
|
||||
Expiration metav1.Time `json:"expiration"`
|
||||
|
||||
// Phase is the current state of the Backup.
|
||||
// +optional
|
||||
Phase BackupPhase `json:"phase,omitempty"`
|
||||
Phase BackupPhase `json:"phase"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable).
|
||||
// +optional
|
||||
// +nullable
|
||||
ValidationErrors []string `json:"validationErrors,omitempty"`
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
|
||||
// StartTimestamp records the time a backup was started.
|
||||
// Separate from CreationTimestamp, since that value changes
|
||||
// on restores.
|
||||
// The server's time is used for StartTimestamps
|
||||
// +optional
|
||||
// +nullable
|
||||
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
|
||||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
|
||||
// CompletionTimestamp records the time a backup was completed.
|
||||
// Completion time is recorded even on failed backups.
|
||||
// Completion time is recorded before uploading the backup object.
|
||||
// The server's time is used for CompletionTimestamps
|
||||
// +optional
|
||||
// +nullable
|
||||
CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"`
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
|
||||
// VolumeSnapshotsAttempted is the total number of attempted
|
||||
// volume snapshots for this backup.
|
||||
// +optional
|
||||
VolumeSnapshotsAttempted int `json:"volumeSnapshotsAttempted,omitempty"`
|
||||
VolumeSnapshotsAttempted int `json:"volumeSnapshotsAttempted"`
|
||||
|
||||
// VolumeSnapshotsCompleted is the total number of successfully
|
||||
// completed volume snapshots for this backup.
|
||||
// +optional
|
||||
VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted,omitempty"`
|
||||
VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted"`
|
||||
|
||||
// Warnings is a count of all warning messages that were generated during
|
||||
// execution of the backup. The actual warnings are in the backup's log
|
||||
// file in object storage.
|
||||
// +optional
|
||||
Warnings int `json:"warnings,omitempty"`
|
||||
Warnings int `json:"warnings"`
|
||||
|
||||
// Errors is a count of all error messages that were generated during
|
||||
// execution of the backup. The actual errors are in the backup's log
|
||||
// file in object storage.
|
||||
// +optional
|
||||
Errors int `json:"errors,omitempty"`
|
||||
|
||||
// Progress contains information about the backup's execution progress. Note
|
||||
// that this information is best-effort only -- if Velero fails to update it
|
||||
// during a backup for any reason, it may be inaccurate/stale.
|
||||
// +optional
|
||||
// +nullable
|
||||
Progress *BackupProgress `json:"progress,omitempty"`
|
||||
}
|
||||
|
||||
// BackupProgress stores information about the progress of a Backup's execution.
|
||||
type BackupProgress struct {
|
||||
// TotalItems is the total number of items to be backed up. This number may change
|
||||
// throughout the execution of the backup due to plugins that return additional related
|
||||
// items to back up, the velero.io/exclude-from-backup label, and various other
|
||||
// filters that happen as items are processed.
|
||||
// +optional
|
||||
TotalItems int `json:"totalItems,omitempty"`
|
||||
|
||||
// ItemsBackedUp is the number of items that have actually been written to the
|
||||
// backup tarball so far.
|
||||
// +optional
|
||||
ItemsBackedUp int `json:"itemsBackedUp,omitempty"`
|
||||
Errors int `json:"errors"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
@@ -303,15 +213,10 @@ type BackupProgress struct {
|
||||
// Backup is a Velero resource that respresents the capture of Kubernetes
|
||||
// cluster state at a point in time (API objects and associated volume state).
|
||||
type Backup struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec BackupSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec BackupSpec `json:"spec"`
|
||||
Status BackupStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -320,9 +225,6 @@ type Backup struct {
|
||||
// BackupList is a list of Backups.
|
||||
type BackupList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []Backup `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []Backup `json:"items"`
|
||||
}
|
||||
|
||||
@@ -26,16 +26,11 @@ import (
|
||||
|
||||
// BackupStorageLocation is a location where Velero stores backup objects.
|
||||
type BackupStorageLocation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec BackupStorageLocationSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Status BackupStorageLocationStatus `json:"status,omitempty"`
|
||||
Spec BackupStorageLocationSpec `json:"spec"`
|
||||
Status BackupStorageLocationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -43,17 +38,14 @@ type BackupStorageLocation struct {
|
||||
// BackupStorageLocationList is a list of BackupStorageLocations.
|
||||
type BackupStorageLocationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []BackupStorageLocation `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []BackupStorageLocation `json:"items"`
|
||||
}
|
||||
|
||||
// StorageType represents the type of storage that a backup location uses.
|
||||
// ObjectStorage must be non-nil, since it is currently the only supported StorageType.
|
||||
type StorageType struct {
|
||||
ObjectStorage *ObjectStorageLocation `json:"objectStorage"`
|
||||
ObjectStorage *ObjectStorageLocation `json:"objectStorage,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.
|
||||
@@ -62,12 +54,7 @@ type ObjectStorageLocation struct {
|
||||
Bucket string `json:"bucket"`
|
||||
|
||||
// Prefix is the path inside a bucket to use for Velero storage. Optional.
|
||||
// +optional
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
// +optional
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationSpec defines the specification for a Velero BackupStorageLocation.
|
||||
@@ -76,23 +63,15 @@ type BackupStorageLocationSpec struct {
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Config is for provider-specific configuration fields.
|
||||
// +optional
|
||||
Config map[string]string `json:"config,omitempty"`
|
||||
Config map[string]string `json:"config"`
|
||||
|
||||
StorageType `json:",inline"`
|
||||
|
||||
// AccessMode defines the permissions for the backup storage location.
|
||||
// +optional
|
||||
AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"`
|
||||
|
||||
// BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync.
|
||||
// +optional
|
||||
// +nullable
|
||||
BackupSyncPeriod *metav1.Duration `json:"backupSyncPeriod,omitempty"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationPhase is the lifecyle phase of a Velero BackupStorageLocation.
|
||||
// +kubebuilder:validation:Enum=Available;Unavailable
|
||||
type BackupStorageLocationPhase string
|
||||
|
||||
const (
|
||||
@@ -104,7 +83,6 @@ const (
|
||||
)
|
||||
|
||||
// BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation.
|
||||
// +kubebuilder:validation:Enum=ReadOnly;ReadWrite
|
||||
type BackupStorageLocationAccessMode string
|
||||
|
||||
const (
|
||||
@@ -116,32 +94,16 @@ const (
|
||||
)
|
||||
|
||||
// TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus.
|
||||
// TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus.
|
||||
|
||||
// BackupStorageLocationStatus describes the current status of a Velero BackupStorageLocation.
|
||||
type BackupStorageLocationStatus struct {
|
||||
// Phase is the current state of the BackupStorageLocation.
|
||||
// +optional
|
||||
Phase BackupStorageLocationPhase `json:"phase,omitempty"`
|
||||
|
||||
// LastSyncedTime is the last time the contents of the location were synced into
|
||||
// the cluster.
|
||||
// +optional
|
||||
// +nullable
|
||||
LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty"`
|
||||
|
||||
// LastSyncedRevision is the value of the `metadata/revision` file in the backup
|
||||
// storage location the last time the BSL's contents were synced into the cluster.
|
||||
//
|
||||
// Deprecated: this field is no longer updated or used for detecting changes to
|
||||
// the location's contents and will be removed entirely in v2.0.
|
||||
// +optional
|
||||
LastSyncedRevision types.UID `json:"lastSyncedRevision,omitempty"`
|
||||
Phase BackupStorageLocationPhase `json:"phase,omitempty"`
|
||||
LastSyncedRevision types.UID `json:"lastSyncedRevision,omitempty"`
|
||||
LastSyncedTime metav1.Time `json:"lastSyncedTime,omitempty"`
|
||||
|
||||
// AccessMode is an unused field.
|
||||
//
|
||||
// Deprecated: there is now an AccessMode field on the Spec and this field
|
||||
// will be removed entirely as of v2.0.
|
||||
// +optional
|
||||
AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"`
|
||||
}
|
||||
|
||||
@@ -36,14 +36,4 @@ const (
|
||||
// NamespaceScopedDir is the name of the directory containing namespace-scoped
|
||||
// resource within a Velero backup.
|
||||
NamespaceScopedDir = "namespaces"
|
||||
|
||||
// CSIFeatureFlag is the feature flag string that defines whether or not CSI features are being used.
|
||||
CSIFeatureFlag = "EnableCSI"
|
||||
|
||||
// PreferredVersionDir is the suffix name of the directory containing the preferred version of the API group
|
||||
// resource within a Velero backup.
|
||||
PreferredVersionDir = "-preferredversion"
|
||||
|
||||
// APIGroupVersionsFeatureFlag is the feature flag string that defines whether or not to handle multiple API Group Versions
|
||||
APIGroupVersionsFeatureFlag = "EnableAPIGroupVersions"
|
||||
)
|
||||
|
||||
@@ -24,16 +24,13 @@ type DeleteBackupRequestSpec struct {
|
||||
}
|
||||
|
||||
// DeleteBackupRequestPhase represents the lifecycle phase of a DeleteBackupRequest.
|
||||
// +kubebuilder:validation:Enum=New;InProgress;Processed
|
||||
type DeleteBackupRequestPhase string
|
||||
|
||||
const (
|
||||
// DeleteBackupRequestPhaseNew means the DeleteBackupRequest has not been processed yet.
|
||||
DeleteBackupRequestPhaseNew DeleteBackupRequestPhase = "New"
|
||||
|
||||
// DeleteBackupRequestPhaseInProgress means the DeleteBackupRequest is being processed.
|
||||
DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress"
|
||||
|
||||
// DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed.
|
||||
DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed"
|
||||
)
|
||||
@@ -41,13 +38,9 @@ const (
|
||||
// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest.
|
||||
type DeleteBackupRequestStatus struct {
|
||||
// Phase is the current state of the DeleteBackupRequest.
|
||||
// +optional
|
||||
Phase DeleteBackupRequestPhase `json:"phase,omitempty"`
|
||||
|
||||
Phase DeleteBackupRequestPhase `json:"phase"`
|
||||
// Errors contains any errors that were encountered during the deletion process.
|
||||
// +optional
|
||||
// +nullable
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Errors []string `json:"errors"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
@@ -55,15 +48,10 @@ type DeleteBackupRequestStatus struct {
|
||||
|
||||
// DeleteBackupRequest is a request to delete one or more backups.
|
||||
type DeleteBackupRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec DeleteBackupRequestSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec DeleteBackupRequestSpec `json:"spec"`
|
||||
Status DeleteBackupRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -72,9 +60,6 @@ type DeleteBackupRequest struct {
|
||||
// DeleteBackupRequestList is a list of DeleteBackupRequests.
|
||||
type DeleteBackupRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []DeleteBackupRequest `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []DeleteBackupRequest `json:"items"`
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ type DownloadRequestSpec struct {
|
||||
}
|
||||
|
||||
// DownloadTargetKind represents what type of file to download.
|
||||
// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshots;BackupResourceList;RestoreLog;RestoreResults
|
||||
type DownloadTargetKind string
|
||||
|
||||
const (
|
||||
@@ -42,20 +41,17 @@ const (
|
||||
type DownloadTarget struct {
|
||||
// Kind is the type of file to download.
|
||||
Kind DownloadTargetKind `json:"kind"`
|
||||
|
||||
// Name is the name of the kubernetes resource with which the file is associated.
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// DownloadRequestPhase represents the lifecycle phase of a DownloadRequest.
|
||||
// +kubebuilder:validation:Enum=New;Processed
|
||||
type DownloadRequestPhase string
|
||||
|
||||
const (
|
||||
// DownloadRequestPhaseNew means the DownloadRequest has not been processed by the
|
||||
// DownloadRequestController yet.
|
||||
DownloadRequestPhaseNew DownloadRequestPhase = "New"
|
||||
|
||||
// DownloadRequestPhaseProcessed means the DownloadRequest has been processed by the
|
||||
// DownloadRequestController.
|
||||
DownloadRequestPhaseProcessed DownloadRequestPhase = "Processed"
|
||||
@@ -64,17 +60,11 @@ const (
|
||||
// DownloadRequestStatus is the current status of a DownloadRequest.
|
||||
type DownloadRequestStatus struct {
|
||||
// Phase is the current state of the DownloadRequest.
|
||||
// +optional
|
||||
Phase DownloadRequestPhase `json:"phase,omitempty"`
|
||||
|
||||
Phase DownloadRequestPhase `json:"phase"`
|
||||
// DownloadURL contains the pre-signed URL for the target file.
|
||||
// +optional
|
||||
DownloadURL string `json:"downloadURL,omitempty"`
|
||||
|
||||
DownloadURL string `json:"downloadURL"`
|
||||
// Expiration is when this DownloadRequest expires and can be deleted by the system.
|
||||
// +optional
|
||||
// +nullable
|
||||
Expiration *metav1.Time `json:"expiration,omitempty"`
|
||||
Expiration metav1.Time `json:"expiration"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
@@ -83,15 +73,10 @@ type DownloadRequestStatus struct {
|
||||
// DownloadRequest is a request to download an artifact from backup object storage, such as a backup
|
||||
// log file.
|
||||
type DownloadRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec DownloadRequestSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec DownloadRequestSpec `json:"spec"`
|
||||
Status DownloadRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -100,9 +85,6 @@ type DownloadRequest struct {
|
||||
// DownloadRequestList is a list of DownloadRequests.
|
||||
type DownloadRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []DownloadRequest `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []DownloadRequest `json:"items"`
|
||||
}
|
||||
|
||||
@@ -35,9 +35,6 @@ const (
|
||||
// PodUIDLabel is the label key used to identify a pod by uid.
|
||||
PodUIDLabel = "velero.io/pod-uid"
|
||||
|
||||
// PVCUIDLabel is the label key used to identify a PVC by uid.
|
||||
PVCUIDLabel = "velero.io/pvc-uid"
|
||||
|
||||
// PodVolumeOperationTimeoutAnnotation is the annotation key used to apply
|
||||
// a backup/restore-specific timeout value for pod volume operations (i.e.
|
||||
// restic backups/restores).
|
||||
@@ -50,16 +47,4 @@ const (
|
||||
// ResticVolumeNamespaceLabel is the label key used to identify which
|
||||
// namespace a restic repository stores pod volume backups for.
|
||||
ResticVolumeNamespaceLabel = "velero.io/volume-namespace"
|
||||
|
||||
// SourceClusterK8sVersionAnnotation is the label key used to identify the k8s
|
||||
// git version of the backup , i.e. v1.16.4
|
||||
SourceClusterK8sGitVersionAnnotation = "velero.io/source-cluster-k8s-gitversion"
|
||||
|
||||
// SourceClusterK8sMajorVersionAnnotation is the label key used to identify the k8s
|
||||
// major version of the backup , i.e. 1
|
||||
SourceClusterK8sMajorVersionAnnotation = "velero.io/source-cluster-k8s-major-version"
|
||||
|
||||
// SourceClusterK8sMajorVersionAnnotation is the label key used to identify the k8s
|
||||
// minor version of the backup , i.e. 16
|
||||
SourceClusterK8sMinorVersionAnnotation = "velero.io/source-cluster-k8s-minor-version"
|
||||
)
|
||||
|
||||
@@ -42,12 +42,10 @@ type PodVolumeBackupSpec struct {
|
||||
|
||||
// Tags are a map of key-value pairs that should be applied to the
|
||||
// volume backup as tags.
|
||||
// +optional
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
}
|
||||
|
||||
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
|
||||
// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed
|
||||
type PodVolumeBackupPhase string
|
||||
|
||||
const (
|
||||
@@ -60,57 +58,38 @@ const (
|
||||
// PodVolumeBackupStatus is the current status of a PodVolumeBackup.
|
||||
type PodVolumeBackupStatus struct {
|
||||
// Phase is the current state of the PodVolumeBackup.
|
||||
// +optional
|
||||
Phase PodVolumeBackupPhase `json:"phase,omitempty"`
|
||||
Phase PodVolumeBackupPhase `json:"phase"`
|
||||
|
||||
// Path is the full path within the controller pod being backed up.
|
||||
// +optional
|
||||
Path string `json:"path,omitempty"`
|
||||
Path string `json:"path"`
|
||||
|
||||
// SnapshotID is the identifier for the snapshot of the pod volume.
|
||||
// +optional
|
||||
SnapshotID string `json:"snapshotID,omitempty"`
|
||||
SnapshotID string `json:"snapshotID"`
|
||||
|
||||
// Message is a message about the pod volume backup's status.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
Message string `json:"message"`
|
||||
|
||||
// StartTimestamp records the time a backup was started.
|
||||
// Separate from CreationTimestamp, since that value changes
|
||||
// on restores.
|
||||
// The server's time is used for StartTimestamps
|
||||
// +optional
|
||||
// +nullable
|
||||
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
|
||||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
|
||||
// CompletionTimestamp records the time a backup was completed.
|
||||
// Completion time is recorded even on failed backups.
|
||||
// Completion time is recorded before uploading the backup object.
|
||||
// The server's time is used for CompletionTimestamps
|
||||
// +optional
|
||||
// +nullable
|
||||
CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"`
|
||||
|
||||
// Progress holds the total number of bytes of the volume and the current
|
||||
// number of backed up bytes. This can be used to display progress information
|
||||
// about the backup operation.
|
||||
// +optional
|
||||
Progress PodVolumeOperationProgress `json:"progress,omitempty"`
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type PodVolumeBackup struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec PodVolumeBackupSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec PodVolumeBackupSpec `json:"spec"`
|
||||
Status PodVolumeBackupStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -119,9 +98,6 @@ type PodVolumeBackup struct {
|
||||
// PodVolumeBackupList is a list of PodVolumeBackups.
|
||||
type PodVolumeBackupList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []PodVolumeBackup `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []PodVolumeBackup `json:"items"`
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
// PodVolumeOperationProgress represents the progress of a
|
||||
// PodVolumeBackup/Restore (restic) operation
|
||||
type PodVolumeOperationProgress struct {
|
||||
// +optional
|
||||
TotalBytes int64 `json:"totalBytes,omitempty"`
|
||||
|
||||
// +optional
|
||||
BytesDone int64 `json:"bytesDone,omitempty"`
|
||||
}
|
||||
@@ -41,7 +41,6 @@ type PodVolumeRestoreSpec struct {
|
||||
}
|
||||
|
||||
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
|
||||
// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed
|
||||
type PodVolumeRestorePhase string
|
||||
|
||||
const (
|
||||
@@ -54,46 +53,29 @@ const (
|
||||
// PodVolumeRestoreStatus is the current status of a PodVolumeRestore.
|
||||
type PodVolumeRestoreStatus struct {
|
||||
// Phase is the current state of the PodVolumeRestore.
|
||||
// +optional
|
||||
Phase PodVolumeRestorePhase `json:"phase,omitempty"`
|
||||
Phase PodVolumeRestorePhase `json:"phase"`
|
||||
|
||||
// Message is a message about the pod volume restore's status.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
Message string `json:"message"`
|
||||
|
||||
// StartTimestamp records the time a restore was started.
|
||||
// The server's time is used for StartTimestamps
|
||||
// +optional
|
||||
// +nullable
|
||||
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
|
||||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
|
||||
// CompletionTimestamp records the time a restore was completed.
|
||||
// Completion time is recorded even on failed restores.
|
||||
// The server's time is used for CompletionTimestamps
|
||||
// +optional
|
||||
// +nullable
|
||||
CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"`
|
||||
|
||||
// Progress holds the total number of bytes of the snapshot and the current
|
||||
// number of restored bytes. This can be used to display progress information
|
||||
// about the restore operation.
|
||||
// +optional
|
||||
Progress PodVolumeOperationProgress `json:"progress,omitempty"`
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type PodVolumeRestore struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec PodVolumeRestoreSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec PodVolumeRestoreSpec `json:"spec"`
|
||||
Status PodVolumeRestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -102,9 +84,6 @@ type PodVolumeRestore struct {
|
||||
// PodVolumeRestoreList is a list of PodVolumeRestores.
|
||||
type PodVolumeRestoreList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []PodVolumeRestore `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []PodVolumeRestore `json:"items"`
|
||||
}
|
||||
|
||||
@@ -39,7 +39,6 @@ type ResticRepositorySpec struct {
|
||||
}
|
||||
|
||||
// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository.
|
||||
// +kubebuilder:validation:Enum=New;Ready;NotReady
|
||||
type ResticRepositoryPhase string
|
||||
|
||||
const (
|
||||
@@ -51,32 +50,23 @@ const (
|
||||
// ResticRepositoryStatus is the current status of a ResticRepository.
|
||||
type ResticRepositoryStatus struct {
|
||||
// Phase is the current state of the ResticRepository.
|
||||
// +optional
|
||||
Phase ResticRepositoryPhase `json:"phase,omitempty"`
|
||||
Phase ResticRepositoryPhase `json:"phase"`
|
||||
|
||||
// Message is a message about the current status of the ResticRepository.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
Message string `json:"message"`
|
||||
|
||||
// LastMaintenanceTime is the last time maintenance was run.
|
||||
// +optional
|
||||
// +nullable
|
||||
LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"`
|
||||
LastMaintenanceTime metav1.Time `json:"lastMaintenanceTime"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type ResticRepository struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec ResticRepositorySpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec ResticRepositorySpec `json:"spec"`
|
||||
Status ResticRepositoryStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -85,9 +75,6 @@ type ResticRepository struct {
|
||||
// ResticRepositoryList is a list of ResticRepositories.
|
||||
type ResticRepositoryList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ResticRepository `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []ResticRepository `json:"items"`
|
||||
}
|
||||
|
||||
@@ -27,64 +27,47 @@ type RestoreSpec struct {
|
||||
// ScheduleName is the unique name of the Velero schedule to restore
|
||||
// from. If specified, and BackupName is empty, Velero will restore
|
||||
// from the most recent successful backup created from this schedule.
|
||||
// +optional
|
||||
ScheduleName string `json:"scheduleName,omitempty"`
|
||||
|
||||
// IncludedNamespaces is a slice of namespace names to include objects
|
||||
// from. If empty, all namespaces are included.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludedNamespaces []string `json:"includedNamespaces,omitempty"`
|
||||
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||
|
||||
// ExcludedNamespaces contains a list of namespaces that are not
|
||||
// included in the restore.
|
||||
// +optional
|
||||
// +nullable
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"`
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||
|
||||
// IncludedResources is a slice of resource names to include
|
||||
// in the restore. If empty, all resources in the backup are included.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludedResources []string `json:"includedResources,omitempty"`
|
||||
IncludedResources []string `json:"includedResources"`
|
||||
|
||||
// ExcludedResources is a slice of resource names that are not
|
||||
// included in the restore.
|
||||
// +optional
|
||||
// +nullable
|
||||
ExcludedResources []string `json:"excludedResources,omitempty"`
|
||||
ExcludedResources []string `json:"excludedResources"`
|
||||
|
||||
// NamespaceMapping is a map of source namespace names
|
||||
// to target namespace names to restore into. Any source
|
||||
// namespaces not included in the map will be restored into
|
||||
// namespaces of the same name.
|
||||
// +optional
|
||||
NamespaceMapping map[string]string `json:"namespaceMapping,omitempty"`
|
||||
NamespaceMapping map[string]string `json:"namespaceMapping"`
|
||||
|
||||
// LabelSelector is a metav1.LabelSelector to filter with
|
||||
// when restoring individual objects from the backup. If empty
|
||||
// or nil, all objects are included. Optional.
|
||||
// +optional
|
||||
// +nullable
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
|
||||
// RestorePVs specifies whether to restore all included
|
||||
// PVs from snapshot (via the cloudprovider).
|
||||
// +optional
|
||||
// +nullable
|
||||
RestorePVs *bool `json:"restorePVs,omitempty"`
|
||||
|
||||
// IncludeClusterResources specifies whether cluster-scoped resources
|
||||
// should be included for consideration in the restore. If null, defaults
|
||||
// to true.
|
||||
// +optional
|
||||
// +nullable
|
||||
IncludeClusterResources *bool `json:"includeClusterResources,omitempty"`
|
||||
}
|
||||
|
||||
// RestorePhase is a string representation of the lifecycle phase
|
||||
// of a Velero restore
|
||||
// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;Completed;PartiallyFailed;Failed
|
||||
type RestorePhase string
|
||||
|
||||
const (
|
||||
@@ -115,28 +98,22 @@ const (
|
||||
// RestoreStatus captures the current status of a Velero restore
|
||||
type RestoreStatus struct {
|
||||
// Phase is the current state of the Restore
|
||||
// +optional
|
||||
Phase RestorePhase `json:"phase,omitempty"`
|
||||
Phase RestorePhase `json:"phase"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable)
|
||||
// +optional
|
||||
// +nullable
|
||||
ValidationErrors []string `json:"validationErrors,omitempty"`
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
|
||||
// Warnings is a count of all warning messages that were generated during
|
||||
// execution of the restore. The actual warnings are stored in object storage.
|
||||
// +optional
|
||||
Warnings int `json:"warnings,omitempty"`
|
||||
Warnings int `json:"warnings"`
|
||||
|
||||
// Errors is a count of all error messages that were generated during
|
||||
// execution of the restore. The actual errors are stored in object storage.
|
||||
// +optional
|
||||
Errors int `json:"errors,omitempty"`
|
||||
Errors int `json:"errors"`
|
||||
|
||||
// FailureReason is an error that caused the entire restore to fail.
|
||||
// +optional
|
||||
FailureReason string `json:"failureReason,omitempty"`
|
||||
FailureReason string `json:"failureReason"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
@@ -145,15 +122,10 @@ type RestoreStatus struct {
|
||||
// Restore is a Velero resource that represents the application of
|
||||
// resources from a Velero backup to a target Kubernetes cluster.
|
||||
type Restore struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec RestoreSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec RestoreSpec `json:"spec"`
|
||||
Status RestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -162,9 +134,6 @@ type Restore struct {
|
||||
// RestoreList is a list of Restores.
|
||||
type RestoreList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []Restore `json:"items"`
|
||||
Items []Restore `json:"items"`
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ type ScheduleSpec struct {
|
||||
|
||||
// SchedulePhase is a string representation of the lifecycle phase
|
||||
// of a Velero schedule
|
||||
// +kubebuilder:validation:Enum=New;Enabled;FailedValidation
|
||||
type SchedulePhase string
|
||||
|
||||
const (
|
||||
@@ -51,19 +50,15 @@ const (
|
||||
// ScheduleStatus captures the current state of a Velero schedule
|
||||
type ScheduleStatus struct {
|
||||
// Phase is the current phase of the Schedule
|
||||
// +optional
|
||||
Phase SchedulePhase `json:"phase,omitempty"`
|
||||
Phase SchedulePhase `json:"phase"`
|
||||
|
||||
// LastBackup is the last time a Backup was run for this
|
||||
// Schedule schedule
|
||||
// +optional
|
||||
// +nullable
|
||||
LastBackup *metav1.Time `json:"lastBackup,omitempty"`
|
||||
LastBackup metav1.Time `json:"lastBackup"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable)
|
||||
// +optional
|
||||
ValidationErrors []string `json:"validationErrors,omitempty"`
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
@@ -72,15 +67,10 @@ type ScheduleStatus struct {
|
||||
// Schedule is a Velero resource that represents a pre-scheduled or
|
||||
// periodic Backup that should be run.
|
||||
type Schedule struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
Spec ScheduleSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec ScheduleSpec `json:"spec"`
|
||||
Status ScheduleStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -89,9 +79,6 @@ type Schedule struct {
|
||||
// ScheduleList is a list of Schedules.
|
||||
type ScheduleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []Schedule `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []Schedule `json:"items"`
|
||||
}
|
||||
|
||||
@@ -26,15 +26,10 @@ import (
|
||||
// ServerStatusRequest is a request to access current status information about
|
||||
// the Velero server.
|
||||
type ServerStatusRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec ServerStatusRequestSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec ServerStatusRequestSpec `json:"spec"`
|
||||
Status ServerStatusRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
@@ -43,7 +38,6 @@ type ServerStatusRequestSpec struct {
|
||||
}
|
||||
|
||||
// ServerStatusRequestPhase represents the lifecycle phase of a ServerStatusRequest.
|
||||
// +kubebuilder:validation:Enum=New;Processed
|
||||
type ServerStatusRequestPhase string
|
||||
|
||||
const (
|
||||
@@ -62,23 +56,17 @@ type PluginInfo struct {
|
||||
// ServerStatusRequestStatus is the current status of a ServerStatusRequest.
|
||||
type ServerStatusRequestStatus struct {
|
||||
// Phase is the current lifecycle phase of the ServerStatusRequest.
|
||||
// +optional
|
||||
Phase ServerStatusRequestPhase `json:"phase,omitempty"`
|
||||
Phase ServerStatusRequestPhase `json:"phase"`
|
||||
|
||||
// ProcessedTimestamp is when the ServerStatusRequest was processed
|
||||
// by the ServerStatusRequestController.
|
||||
// +optional
|
||||
// +nullable
|
||||
ProcessedTimestamp *metav1.Time `json:"processedTimestamp,omitempty"`
|
||||
ProcessedTimestamp metav1.Time `json:"processedTimestamp"`
|
||||
|
||||
// ServerVersion is the Velero server version.
|
||||
// +optional
|
||||
ServerVersion string `json:"serverVersion,omitempty"`
|
||||
ServerVersion string `json:"serverVersion"`
|
||||
|
||||
// Plugins list information about the plugins running on the Velero server
|
||||
// +optional
|
||||
// +nullable
|
||||
Plugins []PluginInfo `json:"plugins,omitempty"`
|
||||
Plugins []PluginInfo `json:"plugins"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -86,9 +74,6 @@ type ServerStatusRequestStatus struct {
|
||||
// ServerStatusRequestList is a list of ServerStatusRequests.
|
||||
type ServerStatusRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ServerStatusRequest `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []ServerStatusRequest `json:"items"`
|
||||
}
|
||||
|
||||
@@ -23,16 +23,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// VolumeSnapshotLocation is a location where Velero stores volume snapshots.
|
||||
type VolumeSnapshotLocation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +optional
|
||||
Spec VolumeSnapshotLocationSpec `json:"spec,omitempty"`
|
||||
|
||||
// +optional
|
||||
Status VolumeSnapshotLocationStatus `json:"status,omitempty"`
|
||||
Spec VolumeSnapshotLocationSpec `json:"spec"`
|
||||
Status VolumeSnapshotLocationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -40,11 +35,8 @@ type VolumeSnapshotLocation struct {
|
||||
// VolumeSnapshotLocationList is a list of VolumeSnapshotLocations.
|
||||
type VolumeSnapshotLocationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []VolumeSnapshotLocation `json:"items"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []VolumeSnapshotLocation `json:"items"`
|
||||
}
|
||||
|
||||
// VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation.
|
||||
@@ -53,12 +45,10 @@ type VolumeSnapshotLocationSpec struct {
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Config is for provider-specific configuration fields.
|
||||
// +optional
|
||||
Config map[string]string `json:"config,omitempty"`
|
||||
Config map[string]string `json:"config"`
|
||||
}
|
||||
|
||||
// VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation.
|
||||
// +kubebuilder:validation:Enum=Available;Unavailable
|
||||
type VolumeSnapshotLocationPhase string
|
||||
|
||||
const (
|
||||
@@ -71,6 +61,5 @@ const (
|
||||
|
||||
// VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation.
|
||||
type VolumeSnapshotLocationStatus struct {
|
||||
// +optional
|
||||
Phase VolumeSnapshotLocationPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func (in *BackupHooks) DeepCopy() *BackupHooks {
|
||||
func (in *BackupList) DeepCopyInto(out *BackupList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Backup, len(*in))
|
||||
@@ -109,22 +109,6 @@ func (in *BackupList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupProgress) DeepCopyInto(out *BackupProgress) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupProgress.
|
||||
func (in *BackupProgress) DeepCopy() *BackupProgress {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupProgress)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) {
|
||||
*out = *in
|
||||
@@ -262,28 +246,14 @@ func (in *BackupSpec) DeepCopy() *BackupSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
|
||||
*out = *in
|
||||
if in.Expiration != nil {
|
||||
in, out := &in.Expiration, &out.Expiration
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
in.Expiration.DeepCopyInto(&out.Expiration)
|
||||
if in.ValidationErrors != nil {
|
||||
in, out := &in.ValidationErrors, &out.ValidationErrors
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.StartTimestamp != nil {
|
||||
in, out := &in.StartTimestamp, &out.StartTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.CompletionTimestamp != nil {
|
||||
in, out := &in.CompletionTimestamp, &out.CompletionTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.Progress != nil {
|
||||
in, out := &in.Progress, &out.Progress
|
||||
*out = new(BackupProgress)
|
||||
**out = **in
|
||||
}
|
||||
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
|
||||
in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -329,7 +299,7 @@ func (in *BackupStorageLocation) DeepCopyObject() runtime.Object {
|
||||
func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]BackupStorageLocation, len(*in))
|
||||
@@ -369,11 +339,6 @@ func (in *BackupStorageLocationSpec) DeepCopyInto(out *BackupStorageLocationSpec
|
||||
}
|
||||
}
|
||||
in.StorageType.DeepCopyInto(&out.StorageType)
|
||||
if in.BackupSyncPeriod != nil {
|
||||
in, out := &in.BackupSyncPeriod, &out.BackupSyncPeriod
|
||||
*out = new(metav1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -390,10 +355,7 @@ func (in *BackupStorageLocationSpec) DeepCopy() *BackupStorageLocationSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStorageLocationStatus) DeepCopyInto(out *BackupStorageLocationStatus) {
|
||||
*out = *in
|
||||
if in.LastSyncedTime != nil {
|
||||
in, out := &in.LastSyncedTime, &out.LastSyncedTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
in.LastSyncedTime.DeepCopyInto(&out.LastSyncedTime)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -439,7 +401,7 @@ func (in *DeleteBackupRequest) DeepCopyObject() runtime.Object {
|
||||
func (in *DeleteBackupRequestList) DeepCopyInto(out *DeleteBackupRequestList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DeleteBackupRequest, len(*in))
|
||||
@@ -537,7 +499,7 @@ func (in *DownloadRequest) DeepCopyObject() runtime.Object {
|
||||
func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DownloadRequest, len(*in))
|
||||
@@ -586,10 +548,7 @@ func (in *DownloadRequestSpec) DeepCopy() *DownloadRequestSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DownloadRequestStatus) DeepCopyInto(out *DownloadRequestStatus) {
|
||||
*out = *in
|
||||
if in.Expiration != nil {
|
||||
in, out := &in.Expiration, &out.Expiration
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
in.Expiration.DeepCopyInto(&out.Expiration)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -644,11 +603,6 @@ func (in *ExecHook) DeepCopy() *ExecHook {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) {
|
||||
*out = *in
|
||||
if in.CACert != nil {
|
||||
in, out := &in.CACert, &out.CACert
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -710,7 +664,7 @@ func (in *PodVolumeBackup) DeepCopyObject() runtime.Object {
|
||||
func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]PodVolumeBackup, len(*in))
|
||||
@@ -766,15 +720,8 @@ func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
|
||||
*out = *in
|
||||
if in.StartTimestamp != nil {
|
||||
in, out := &in.StartTimestamp, &out.StartTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.CompletionTimestamp != nil {
|
||||
in, out := &in.CompletionTimestamp, &out.CompletionTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.Progress = in.Progress
|
||||
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
|
||||
in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -788,22 +735,6 @@ func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodVolumeOperationProgress) DeepCopyInto(out *PodVolumeOperationProgress) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeOperationProgress.
|
||||
func (in *PodVolumeOperationProgress) DeepCopy() *PodVolumeOperationProgress {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodVolumeOperationProgress)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) {
|
||||
*out = *in
|
||||
@@ -836,7 +767,7 @@ func (in *PodVolumeRestore) DeepCopyObject() runtime.Object {
|
||||
func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]PodVolumeRestore, len(*in))
|
||||
@@ -885,15 +816,8 @@ func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) {
|
||||
*out = *in
|
||||
if in.StartTimestamp != nil {
|
||||
in, out := &in.StartTimestamp, &out.StartTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.CompletionTimestamp != nil {
|
||||
in, out := &in.CompletionTimestamp, &out.CompletionTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.Progress = in.Progress
|
||||
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
|
||||
in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -939,7 +863,7 @@ func (in *ResticRepository) DeepCopyObject() runtime.Object {
|
||||
func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ResticRepository, len(*in))
|
||||
@@ -988,10 +912,7 @@ func (in *ResticRepositorySpec) DeepCopy() *ResticRepositorySpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResticRepositoryStatus) DeepCopyInto(out *ResticRepositoryStatus) {
|
||||
*out = *in
|
||||
if in.LastMaintenanceTime != nil {
|
||||
in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
in.LastMaintenanceTime.DeepCopyInto(&out.LastMaintenanceTime)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1037,7 +958,7 @@ func (in *Restore) DeepCopyObject() runtime.Object {
|
||||
func (in *RestoreList) DeepCopyInto(out *RestoreList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Restore, len(*in))
|
||||
@@ -1177,7 +1098,7 @@ func (in *Schedule) DeepCopyObject() runtime.Object {
|
||||
func (in *ScheduleList) DeepCopyInto(out *ScheduleList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Schedule, len(*in))
|
||||
@@ -1226,10 +1147,7 @@ func (in *ScheduleSpec) DeepCopy() *ScheduleSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) {
|
||||
*out = *in
|
||||
if in.LastBackup != nil {
|
||||
in, out := &in.LastBackup, &out.LastBackup
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
in.LastBackup.DeepCopyInto(&out.LastBackup)
|
||||
if in.ValidationErrors != nil {
|
||||
in, out := &in.ValidationErrors, &out.ValidationErrors
|
||||
*out = make([]string, len(*in))
|
||||
@@ -1280,7 +1198,7 @@ func (in *ServerStatusRequest) DeepCopyObject() runtime.Object {
|
||||
func (in *ServerStatusRequestList) DeepCopyInto(out *ServerStatusRequestList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ServerStatusRequest, len(*in))
|
||||
@@ -1328,10 +1246,7 @@ func (in *ServerStatusRequestSpec) DeepCopy() *ServerStatusRequestSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServerStatusRequestStatus) DeepCopyInto(out *ServerStatusRequestStatus) {
|
||||
*out = *in
|
||||
if in.ProcessedTimestamp != nil {
|
||||
in, out := &in.ProcessedTimestamp, &out.ProcessedTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
in.ProcessedTimestamp.DeepCopyInto(&out.ProcessedTimestamp)
|
||||
if in.Plugins != nil {
|
||||
in, out := &in.Plugins, &out.Plugins
|
||||
*out = make([]PluginInfo, len(*in))
|
||||
@@ -1356,7 +1271,7 @@ func (in *StorageType) DeepCopyInto(out *StorageType) {
|
||||
if in.ObjectStorage != nil {
|
||||
in, out := &in.ObjectStorage, &out.ObjectStorage
|
||||
*out = new(ObjectStorageLocation)
|
||||
(*in).DeepCopyInto(*out)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1403,7 +1318,7 @@ func (in *VolumeSnapshotLocation) DeepCopyObject() runtime.Object {
|
||||
func (in *VolumeSnapshotLocationList) DeepCopyInto(out *VolumeSnapshotLocationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]VolumeSnapshotLocation, len(*in))
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
// Extractor unzips/extracts a backup tarball to a local
|
||||
// temp directory.
|
||||
type Extractor struct {
|
||||
log logrus.FieldLogger
|
||||
fs filesystem.Interface
|
||||
}
|
||||
|
||||
func NewExtractor(log logrus.FieldLogger, fs filesystem.Interface) *Extractor {
|
||||
return &Extractor{
|
||||
log: log,
|
||||
fs: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// UnzipAndExtractBackup extracts a reader on a gzipped tarball to a local temp directory
|
||||
func (e *Extractor) UnzipAndExtractBackup(src io.Reader) (string, error) {
|
||||
gzr, err := gzip.NewReader(src)
|
||||
if err != nil {
|
||||
e.log.Infof("error creating gzip reader: %v", err)
|
||||
return "", err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
return e.readBackup(tar.NewReader(gzr))
|
||||
}
|
||||
|
||||
func (e *Extractor) writeFile(target string, tarRdr *tar.Reader) error {
|
||||
file, err := e.fs.Create(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err := io.Copy(file, tarRdr); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) {
|
||||
dir, err := e.fs.TempDir("", "")
|
||||
if err != nil {
|
||||
e.log.Infof("error creating temp dir: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
header, err := tarRdr.Next()
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
e.log.Infof("error reading tar: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
target := filepath.Join(dir, header.Name)
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
err := e.fs.MkdirAll(target, header.FileInfo().Mode())
|
||||
if err != nil {
|
||||
e.log.Infof("mkdirall error: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
case tar.TypeReg:
|
||||
// make sure we have the directory created
|
||||
err := e.fs.MkdirAll(filepath.Dir(target), header.FileInfo().Mode())
|
||||
if err != nil {
|
||||
e.log.Infof("mkdirall error: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// create the file
|
||||
if err := e.writeFile(target, tarRdr); err != nil {
|
||||
e.log.Infof("error copying: %v", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dir, nil
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
// Parser traverses an extracted archive on disk to validate
|
||||
// it and provide a helpful representation of it to consumers.
|
||||
type Parser struct {
|
||||
log logrus.FieldLogger
|
||||
fs filesystem.Interface
|
||||
}
|
||||
|
||||
// ResourceItems contains the collection of items of a given resource type
|
||||
// within a backup, grouped by namespace (or empty string for cluster-scoped
|
||||
// resources).
|
||||
type ResourceItems struct {
|
||||
// GroupResource is API group and resource name,
|
||||
// formatted as "resource.group". For the "core"
|
||||
// API group, the ".group" suffix is omitted.
|
||||
GroupResource string
|
||||
|
||||
// ItemsByNamespace is a map from namespace (or empty string
|
||||
// for cluster-scoped resources) to a list of individual item
|
||||
// names contained in the archive. Item names **do not** include
|
||||
// the file extension.
|
||||
ItemsByNamespace map[string][]string
|
||||
}
|
||||
|
||||
// NewParser constructs a Parser.
|
||||
func NewParser(log logrus.FieldLogger, fs filesystem.Interface) *Parser {
|
||||
return &Parser{
|
||||
log: log,
|
||||
fs: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse reads an extracted backup on the file system and returns
|
||||
// a structured catalog of the resources and items contained within it.
|
||||
func (p *Parser) Parse(dir string) (map[string]*ResourceItems, error) {
|
||||
// ensure top-level "resources" directory exists, and read subdirectories
|
||||
// of it, where each one is expected to correspond to a resource.
|
||||
resourcesDir := filepath.Join(dir, velerov1api.ResourcesDir)
|
||||
exists, err := p.fs.DirExists(resourcesDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking for existence of directory %q", strings.TrimPrefix(resourcesDir, dir+"/"))
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.Errorf("directory %q does not exist", strings.TrimPrefix(resourcesDir, dir+"/"))
|
||||
}
|
||||
|
||||
resourceDirs, err := p.fs.ReadDir(resourcesDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading contents of directory %q", strings.TrimPrefix(resourcesDir, dir+"/"))
|
||||
}
|
||||
|
||||
// loop through each subdirectory (one per resource) and assemble
|
||||
// catalog of items within it.
|
||||
resources := map[string]*ResourceItems{}
|
||||
for _, resourceDir := range resourceDirs {
|
||||
if !resourceDir.IsDir() {
|
||||
p.log.Warnf("Ignoring unexpected file %q in directory %q", resourceDir.Name(), strings.TrimPrefix(resourcesDir, dir+"/"))
|
||||
continue
|
||||
}
|
||||
|
||||
resourceItems := &ResourceItems{
|
||||
GroupResource: resourceDir.Name(),
|
||||
ItemsByNamespace: map[string][]string{},
|
||||
}
|
||||
|
||||
// check for existence of a "cluster" subdirectory containing cluster-scoped
|
||||
// instances of this resource, and read its contents if it exists.
|
||||
clusterScopedDir := filepath.Join(resourcesDir, resourceDir.Name(), velerov1api.ClusterScopedDir)
|
||||
exists, err := p.fs.DirExists(clusterScopedDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking for existence of directory %q", strings.TrimPrefix(clusterScopedDir, dir+"/"))
|
||||
}
|
||||
if exists {
|
||||
items, err := p.getResourceItemsForScope(clusterScopedDir, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(items) > 0 {
|
||||
resourceItems.ItemsByNamespace[""] = items
|
||||
}
|
||||
}
|
||||
|
||||
// check for existence of a "namespaces" subdirectory containing further subdirectories,
|
||||
// one per namespace, and read its contents if it exists.
|
||||
namespaceScopedDir := filepath.Join(resourcesDir, resourceDir.Name(), velerov1api.NamespaceScopedDir)
|
||||
exists, err = p.fs.DirExists(namespaceScopedDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking for existence of directory %q", strings.TrimPrefix(namespaceScopedDir, dir+"/"))
|
||||
}
|
||||
if exists {
|
||||
namespaceDirs, err := p.fs.ReadDir(namespaceScopedDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading contents of directory %q", strings.TrimPrefix(namespaceScopedDir, dir+"/"))
|
||||
}
|
||||
|
||||
for _, namespaceDir := range namespaceDirs {
|
||||
if !namespaceDir.IsDir() {
|
||||
p.log.Warnf("Ignoring unexpected file %q in directory %q", namespaceDir.Name(), strings.TrimPrefix(namespaceScopedDir, dir+"/"))
|
||||
continue
|
||||
}
|
||||
|
||||
items, err := p.getResourceItemsForScope(filepath.Join(namespaceScopedDir, namespaceDir.Name()), dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(items) > 0 {
|
||||
resourceItems.ItemsByNamespace[namespaceDir.Name()] = items
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources[resourceDir.Name()] = resourceItems
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
// getResourceItemsForScope returns the list of items with a namespace or
|
||||
// cluster-scoped subdirectory for a specific resource.
|
||||
func (p *Parser) getResourceItemsForScope(dir, archiveRootDir string) ([]string, error) {
|
||||
files, err := p.fs.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading contents of directory %q", strings.TrimPrefix(dir, archiveRootDir+"/"))
|
||||
}
|
||||
|
||||
var items []string
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
p.log.Warnf("Ignoring unexpected subdirectory %q in directory %q", file.Name(), strings.TrimPrefix(dir, archiveRootDir+"/"))
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, strings.TrimSuffix(file.Name(), ".json"))
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
files []string
|
||||
dir string
|
||||
wantErr error
|
||||
want map[string]*ResourceItems
|
||||
}{
|
||||
{
|
||||
name: "when there is no top-level resources directory, an error is returned",
|
||||
dir: "root-dir",
|
||||
wantErr: errors.New("directory \"resources\" does not exist"),
|
||||
},
|
||||
{
|
||||
name: "when there are no directories under the resources directory, an empty map is returned",
|
||||
dir: "root-dir",
|
||||
files: []string{"root-dir/resources/"},
|
||||
want: map[string]*ResourceItems{},
|
||||
},
|
||||
{
|
||||
name: "a mix of cluster-scoped and namespaced items across multiple resources are correctly returned",
|
||||
dir: "root-dir",
|
||||
files: []string{
|
||||
"root-dir/resources/widgets.foo/cluster/item-1.json",
|
||||
"root-dir/resources/widgets.foo/cluster/item-2.json",
|
||||
"root-dir/resources/widgets.foo/namespaces/ns-1/item-1.json",
|
||||
"root-dir/resources/widgets.foo/namespaces/ns-1/item-2.json",
|
||||
"root-dir/resources/widgets.foo/namespaces/ns-2/item-1.json",
|
||||
"root-dir/resources/widgets.foo/namespaces/ns-2/item-2.json",
|
||||
|
||||
"root-dir/resources/dongles.foo/cluster/item-3.json",
|
||||
"root-dir/resources/dongles.foo/cluster/item-4.json",
|
||||
|
||||
"root-dir/resources/dongles.bar/namespaces/ns-3/item-3.json",
|
||||
"root-dir/resources/dongles.bar/namespaces/ns-3/item-4.json",
|
||||
"root-dir/resources/dongles.bar/namespaces/ns-4/item-5.json",
|
||||
"root-dir/resources/dongles.bar/namespaces/ns-4/item-6.json",
|
||||
},
|
||||
want: map[string]*ResourceItems{
|
||||
"widgets.foo": {
|
||||
GroupResource: "widgets.foo",
|
||||
ItemsByNamespace: map[string][]string{
|
||||
"": {"item-1", "item-2"},
|
||||
"ns-1": {"item-1", "item-2"},
|
||||
"ns-2": {"item-1", "item-2"},
|
||||
},
|
||||
},
|
||||
"dongles.foo": {
|
||||
GroupResource: "dongles.foo",
|
||||
ItemsByNamespace: map[string][]string{
|
||||
"": {"item-3", "item-4"},
|
||||
},
|
||||
},
|
||||
"dongles.bar": {
|
||||
GroupResource: "dongles.bar",
|
||||
ItemsByNamespace: map[string][]string{
|
||||
"ns-3": {"item-3", "item-4"},
|
||||
"ns-4": {"item-5", "item-6"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
p := &Parser{
|
||||
log: test.NewLogger(),
|
||||
fs: test.NewFakeFileSystem(),
|
||||
}
|
||||
|
||||
for _, file := range tc.files {
|
||||
require.NoError(t, p.fs.MkdirAll(file, 0755))
|
||||
|
||||
if !strings.HasSuffix(file, "/") {
|
||||
res, err := p.fs.Create(file)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, res.Close())
|
||||
}
|
||||
}
|
||||
|
||||
res, err := p.Parse(tc.dir)
|
||||
if tc.wantErr != nil {
|
||||
assert.Equal(t, err.Error(), tc.wantErr.Error())
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, tc.want, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017, 2020 the Velero contributors.
|
||||
Copyright 2017 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -20,55 +20,42 @@ import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/podexec"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/collections"
|
||||
api "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
"github.com/heptio/velero/pkg/podexec"
|
||||
"github.com/heptio/velero/pkg/restic"
|
||||
"github.com/heptio/velero/pkg/util/collections"
|
||||
)
|
||||
|
||||
// BackupVersion is the current backup major version for Velero.
|
||||
// Deprecated, use BackupFormatVersion
|
||||
// BackupVersion is the current backup version for Velero.
|
||||
const BackupVersion = 1
|
||||
|
||||
// BackupFormatVersion is the current backup version for Velero, including major, minor, and patch.
|
||||
const BackupFormatVersion = "1.1.0"
|
||||
|
||||
// Backupper performs backups.
|
||||
type Backupper interface {
|
||||
// Backup takes a backup using the specification in the velerov1api.Backup and writes backup and log data
|
||||
// Backup takes a backup using the specification in the api.Backup and writes backup and log data
|
||||
// to the given writers.
|
||||
Backup(logger logrus.FieldLogger, backup *Request, backupFile io.Writer, actions []velero.BackupItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error
|
||||
}
|
||||
|
||||
// kubernetesBackupper implements Backupper.
|
||||
type kubernetesBackupper struct {
|
||||
backupClient velerov1client.BackupsGetter
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
groupBackupperFactory groupBackupperFactory
|
||||
resticBackupperFactory restic.BackupperFactory
|
||||
resticTimeout time.Duration
|
||||
}
|
||||
@@ -97,7 +84,6 @@ func cohabitatingResources() map[string]*cohabitatingResource {
|
||||
|
||||
// NewKubernetesBackupper creates a new kubernetesBackupper.
|
||||
func NewKubernetesBackupper(
|
||||
backupClient velerov1client.BackupsGetter,
|
||||
discoveryHelper discovery.Helper,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
@@ -105,10 +91,10 @@ func NewKubernetesBackupper(
|
||||
resticTimeout time.Duration,
|
||||
) (Backupper, error) {
|
||||
return &kubernetesBackupper{
|
||||
backupClient: backupClient,
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
groupBackupperFactory: &defaultGroupBackupperFactory{},
|
||||
resticBackupperFactory: resticBackupperFactory,
|
||||
resticTimeout: resticTimeout,
|
||||
}, nil
|
||||
@@ -156,10 +142,7 @@ func getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []s
|
||||
func(item string) string {
|
||||
gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion(""))
|
||||
if err != nil {
|
||||
// If we can't resolve it, return it as-is. This prevents the generated
|
||||
// includes-excludes list from including *everything*, if none of the includes
|
||||
// can be resolved. ref. https://github.com/vmware-tanzu/velero/issues/2461
|
||||
return item
|
||||
return ""
|
||||
}
|
||||
|
||||
gr := gvr.GroupResource()
|
||||
@@ -172,11 +155,11 @@ func getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []s
|
||||
|
||||
// getNamespaceIncludesExcludes returns an IncludesExcludes list containing which namespaces to
|
||||
// include and exclude from the backup.
|
||||
func getNamespaceIncludesExcludes(backup *velerov1api.Backup) *collections.IncludesExcludes {
|
||||
func getNamespaceIncludesExcludes(backup *api.Backup) *collections.IncludesExcludes {
|
||||
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
|
||||
}
|
||||
|
||||
func getResourceHooks(hookSpecs []velerov1api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]resourceHook, error) {
|
||||
func getResourceHooks(hookSpecs []api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]resourceHook, error) {
|
||||
resourceHooks := make([]resourceHook, 0, len(hookSpecs))
|
||||
|
||||
for _, s := range hookSpecs {
|
||||
@@ -191,7 +174,7 @@ func getResourceHooks(hookSpecs []velerov1api.BackupResourceHookSpec, discoveryH
|
||||
return resourceHooks, nil
|
||||
}
|
||||
|
||||
func getResourceHook(hookSpec velerov1api.BackupResourceHookSpec, discoveryHelper discovery.Helper) (resourceHook, error) {
|
||||
func getResourceHook(hookSpec api.BackupResourceHookSpec, discoveryHelper discovery.Helper) (resourceHook, error) {
|
||||
h := resourceHook{
|
||||
name: hookSpec.Name,
|
||||
namespaces: collections.NewIncludesExcludes().Includes(hookSpec.IncludedNamespaces...).Excludes(hookSpec.ExcludedNamespaces...),
|
||||
@@ -216,7 +199,7 @@ type VolumeSnapshotterGetter interface {
|
||||
}
|
||||
|
||||
// Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file
|
||||
// written to backupFile. The finalized velerov1api.Backup is written to metadata. Any error that represents
|
||||
// written to backupFile. The finalized api.Backup is written to metadata. Any error that represents
|
||||
// a complete backup failure is returned. Errors that constitute partial failures (i.e. failures to
|
||||
// back up individual resources that don't prevent the backup from continuing to be processed) are logged
|
||||
// to the backup log.
|
||||
@@ -254,7 +237,7 @@ func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Req
|
||||
backupRequest.BackedUpItems = map[itemKey]struct{}{}
|
||||
|
||||
podVolumeTimeout := kb.resticTimeout
|
||||
if val := backupRequest.Annotations[velerov1api.PodVolumeOperationTimeoutAnnotation]; val != "" {
|
||||
if val := backupRequest.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" {
|
||||
parsed, err := time.ParseDuration(val)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val)
|
||||
@@ -274,230 +257,31 @@ func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Req
|
||||
}
|
||||
}
|
||||
|
||||
// set up a temp dir for the itemCollector to use to temporarily
|
||||
// store items as they're scraped from the API.
|
||||
tempDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating temp dir for backup")
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
gb := kb.groupBackupperFactory.newGroupBackupper(
|
||||
log,
|
||||
backupRequest,
|
||||
kb.dynamicFactory,
|
||||
kb.discoveryHelper,
|
||||
cohabitatingResources(),
|
||||
kb.podCommandExecutor,
|
||||
tw,
|
||||
resticBackupper,
|
||||
newPVCSnapshotTracker(),
|
||||
volumeSnapshotterGetter,
|
||||
)
|
||||
|
||||
collector := &itemCollector{
|
||||
log: log,
|
||||
backupRequest: backupRequest,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
dynamicFactory: kb.dynamicFactory,
|
||||
cohabitatingResources: cohabitatingResources(),
|
||||
dir: tempDir,
|
||||
}
|
||||
|
||||
items := collector.getAllItems()
|
||||
log.WithField("progress", "").Infof("Collected %d items matching the backup spec from the Kubernetes API (actual number of items backed up may be more or less depending on velero.io/exclude-from-backup annotation, plugins returning additional related items to back up, etc.)", len(items))
|
||||
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
patch := fmt.Sprintf(`{"status":{"progress":{"totalItems":%d}}}`, len(items))
|
||||
if _, err := kb.backupClient.Backups(backupRequest.Namespace).Patch(backupRequest.Name, types.MergePatchType, []byte(patch)); err != nil {
|
||||
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress.totalItems")
|
||||
}
|
||||
|
||||
itemBackupper := &itemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
dynamicFactory: kb.dynamicFactory,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: newPVCSnapshotTracker(),
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
itemHookHandler: &defaultItemHookHandler{
|
||||
podCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
}
|
||||
|
||||
// helper struct to send current progress between the main
|
||||
// backup loop and the gouroutine that periodically patches
|
||||
// the backup CR with progress updates
|
||||
type progressUpdate struct {
|
||||
totalItems, itemsBackedUp int
|
||||
}
|
||||
|
||||
// the main backup process will send on this channel once
|
||||
// for every item it processes.
|
||||
update := make(chan progressUpdate)
|
||||
|
||||
// the main backup process will send on this channel when
|
||||
// it's done sending progress updates
|
||||
quit := make(chan struct{})
|
||||
|
||||
// This is the progress updater goroutine that receives
|
||||
// progress updates on the 'update' channel. It patches
|
||||
// the backup CR with progress updates at most every second,
|
||||
// but it will not issue a patch if it hasn't received a new
|
||||
// update since the previous patch. This goroutine exits
|
||||
// when it receives on the 'quit' channel.
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
var lastUpdate *progressUpdate
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
ticker.Stop()
|
||||
return
|
||||
case val := <-update:
|
||||
lastUpdate = &val
|
||||
case <-ticker.C:
|
||||
if lastUpdate != nil {
|
||||
backupRequest.Status.Progress.TotalItems = lastUpdate.totalItems
|
||||
backupRequest.Status.Progress.ItemsBackedUp = lastUpdate.itemsBackedUp
|
||||
|
||||
patch := fmt.Sprintf(`{"status":{"progress":{"totalItems":%d,"itemsBackedUp":%d}}}`, lastUpdate.totalItems, lastUpdate.itemsBackedUp)
|
||||
if _, err := kb.backupClient.Backups(backupRequest.Namespace).Patch(backupRequest.Name, types.MergePatchType, []byte(patch)); err != nil {
|
||||
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
|
||||
}
|
||||
lastUpdate = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
backedUpGroupResources := map[schema.GroupResource]bool{}
|
||||
totalItems := len(items)
|
||||
|
||||
for i, item := range items {
|
||||
log.WithFields(map[string]interface{}{
|
||||
"progress": "",
|
||||
"resource": item.groupResource.String(),
|
||||
"namespace": item.namespace,
|
||||
"name": item.name,
|
||||
}).Infof("Processing item")
|
||||
|
||||
// use an anonymous func so we can defer-close/remove the file
|
||||
// as soon as we're done with it
|
||||
func() {
|
||||
var unstructured unstructured.Unstructured
|
||||
|
||||
f, err := os.Open(item.path)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error opening file containing item")
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if err := json.NewDecoder(f).Decode(&unstructured); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error decoding JSON from file")
|
||||
return
|
||||
}
|
||||
|
||||
if backedUp := kb.backupItem(log, item.groupResource, itemBackupper, &unstructured, item.preferredGVR); backedUp {
|
||||
backedUpGroupResources[item.groupResource] = true
|
||||
}
|
||||
}()
|
||||
|
||||
// updated total is computed as "how many items we've backed up so far, plus
|
||||
// how many items we know of that are remaining"
|
||||
totalItems = len(backupRequest.BackedUpItems) + (len(items) - (i + 1))
|
||||
|
||||
// send a progress update
|
||||
update <- progressUpdate{
|
||||
totalItems: totalItems,
|
||||
itemsBackedUp: len(backupRequest.BackedUpItems),
|
||||
}
|
||||
|
||||
log.WithFields(map[string]interface{}{
|
||||
"progress": "",
|
||||
"resource": item.groupResource.String(),
|
||||
"namespace": item.namespace,
|
||||
"name": item.name,
|
||||
}).Infof("Backed up %d items out of an estimated total of %d (estimate will change throughout the backup)", len(backupRequest.BackedUpItems), totalItems)
|
||||
}
|
||||
|
||||
// no more progress updates will be sent on the 'update' channel
|
||||
quit <- struct{}{}
|
||||
|
||||
// back up CRD for resource if found. We should only need to do this if we've backed up at least
|
||||
// one item for the resource and IncludeClusterResources is nil. If IncludeClusterResources is false
|
||||
// we don't want to back it up, and if it's true it will already be included.
|
||||
if backupRequest.Spec.IncludeClusterResources == nil {
|
||||
for gr := range backedUpGroupResources {
|
||||
kb.backupCRD(log, gr, itemBackupper)
|
||||
for _, group := range kb.discoveryHelper.Resources() {
|
||||
if err := gb.backupGroup(group); err != nil {
|
||||
log.WithError(err).WithField("apiGroup", group.String()).Error("Error backing up API group")
|
||||
}
|
||||
}
|
||||
|
||||
// do a final update on progress since we may have just added some CRDs and may not have updated
|
||||
// for the last few processed items.
|
||||
backupRequest.Status.Progress.TotalItems = len(backupRequest.BackedUpItems)
|
||||
backupRequest.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems)
|
||||
|
||||
patch = fmt.Sprintf(`{"status":{"progress":{"totalItems":%d,"itemsBackedUp":%d}}}`, len(backupRequest.BackedUpItems), len(backupRequest.BackedUpItems))
|
||||
if _, err := kb.backupClient.Backups(backupRequest.Namespace).Patch(backupRequest.Name, types.MergePatchType, []byte(patch)); err != nil {
|
||||
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
|
||||
}
|
||||
|
||||
log.WithField("progress", "").Infof("Backed up a total of %d items", len(backupRequest.BackedUpItems))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) backupItem(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource) bool {
|
||||
backedUpItem, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR)
|
||||
if aggregate, ok := err.(kubeerrs.Aggregate); ok {
|
||||
log.WithField("name", unstructured.GetName()).Infof("%d errors encountered backup up item", len(aggregate.Errors()))
|
||||
// log each error separately so we get error location info in the log, and an
|
||||
// accurate count of errors
|
||||
for _, err = range aggregate.Errors() {
|
||||
log.WithError(err).WithField("name", unstructured.GetName()).Error("Error backing up item")
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("name", unstructured.GetName()).Error("Error backing up item")
|
||||
return false
|
||||
}
|
||||
return backedUpItem
|
||||
}
|
||||
|
||||
// backupCRD checks if the resource is a custom resource, and if so, backs up the custom resource definition
|
||||
// associated with it.
|
||||
func (kb *kubernetesBackupper) backupCRD(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper) {
|
||||
crdGroupResource := kuberesource.CustomResourceDefinitions
|
||||
|
||||
log.Debugf("Getting server preferred API version for %s", crdGroupResource)
|
||||
gvr, apiResource, err := kb.discoveryHelper.ResourceFor(crdGroupResource.WithVersion(""))
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting resolved resource for %s", crdGroupResource)
|
||||
return
|
||||
}
|
||||
log.Debugf("Got server preferred API version %s for %s", gvr.Version, crdGroupResource)
|
||||
|
||||
log.Debugf("Getting dynamic client for %s", gvr.String())
|
||||
crdClient, err := kb.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), apiResource, "")
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting dynamic client for %s", crdGroupResource)
|
||||
return
|
||||
}
|
||||
log.Debugf("Got dynamic client for %s", gvr.String())
|
||||
|
||||
// try to get a CRD whose name matches the provided GroupResource
|
||||
unstructured, err := crdClient.Get(gr.String(), metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
// not found: this means the GroupResource provided was not a
|
||||
// custom resource, so there's no CRD to back up.
|
||||
log.Debugf("No CRD found for GroupResource %s", gr.String())
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting CRD %s", gr.String())
|
||||
return
|
||||
}
|
||||
log.Infof("Found associated CRD %s to add to backup", gr.String())
|
||||
|
||||
kb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr)
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) writeBackupVersion(tw *tar.Writer) error {
|
||||
versionFile := filepath.Join(velerov1api.MetadataDir, "version")
|
||||
versionString := fmt.Sprintf("%s\n", BackupFormatVersion)
|
||||
versionFile := filepath.Join(api.MetadataDir, "version")
|
||||
versionString := fmt.Sprintf("%d\n", BackupVersion)
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: versionFile,
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
)
|
||||
|
||||
// PVCAction inspects a PersistentVolumeClaim for the PersistentVolume
|
||||
|
||||
@@ -24,10 +24,10 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
velerotest "github.com/heptio/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestBackupPVAction(t *testing.T) {
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
@@ -41,17 +40,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/test"
|
||||
testutil "github.com/vmware-tanzu/velero/pkg/test"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
velerov1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/builder"
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
"github.com/heptio/velero/pkg/restic"
|
||||
"github.com/heptio/velero/pkg/test"
|
||||
testutil "github.com/heptio/velero/pkg/test"
|
||||
kubeutil "github.com/heptio/velero/pkg/util/kube"
|
||||
"github.com/heptio/velero/pkg/volume"
|
||||
)
|
||||
|
||||
func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
@@ -99,55 +98,11 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
}
|
||||
file = file + "/" + item.name + ".json"
|
||||
expectedFiles = append(expectedFiles, file)
|
||||
|
||||
fileWithVersion := "resources/" + gvkToResource[item.resource]
|
||||
if item.namespace != "" {
|
||||
fileWithVersion = fileWithVersion + "/v1-preferredversion/" + "namespaces/" + item.namespace
|
||||
} else {
|
||||
file = file + "/cluster"
|
||||
fileWithVersion = fileWithVersion + "/v1-preferredversion" + "/cluster"
|
||||
}
|
||||
fileWithVersion = fileWithVersion + "/" + item.name + ".json"
|
||||
expectedFiles = append(expectedFiles, fileWithVersion)
|
||||
}
|
||||
|
||||
assertTarballContents(t, backupFile, append(expectedFiles, "metadata/version")...)
|
||||
}
|
||||
|
||||
// TestBackupProgressIsUpdated verifies that after a backup has run, its
|
||||
// status.progress fields are updated to reflect the total number of items
|
||||
// backed up. It validates this by comparing their values to the length of
|
||||
// the request's BackedUpItems field.
|
||||
func TestBackupProgressIsUpdated(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
req := &Request{Backup: defaultBackup().Result()}
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
|
||||
apiResources := []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("foo", "bar").Result(),
|
||||
builder.ForPod("zoo", "raz").Result(),
|
||||
),
|
||||
test.Deployments(
|
||||
builder.ForDeployment("foo", "bar").Result(),
|
||||
builder.ForDeployment("zoo", "raz").Result(),
|
||||
),
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("bar").Result(),
|
||||
builder.ForPersistentVolume("baz").Result(),
|
||||
),
|
||||
}
|
||||
for _, resource := range apiResources {
|
||||
h.addItems(t, resource)
|
||||
}
|
||||
|
||||
h.backupper.Backup(h.log, req, backupFile, nil, nil)
|
||||
|
||||
require.NotNil(t, req.Status.Progress)
|
||||
assert.Equal(t, len(req.BackedUpItems), req.Status.Progress.TotalItems)
|
||||
assert.Equal(t, len(req.BackedUpItems), req.Status.Progress.ItemsBackedUp)
|
||||
}
|
||||
|
||||
// TestBackupResourceFiltering runs backups with different combinations
|
||||
// of resource filters (included/excluded resources, included/excluded
|
||||
// namespaces, label selectors, "include cluster resources" flag), and
|
||||
@@ -179,10 +134,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -203,8 +154,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -225,8 +174,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -247,8 +194,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -269,8 +214,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -297,10 +240,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -326,9 +265,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/persistentvolumes/cluster/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/bar.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -353,9 +289,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/persistentvolumes/cluster/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/bar.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -380,8 +313,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/persistentvolumes/cluster/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/bar.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -409,12 +340,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/persistentvolumes/cluster/bar.json",
|
||||
"resources/persistentvolumes/cluster/baz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/bar.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/baz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -439,10 +364,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/ns-2/pod-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -465,8 +386,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -488,8 +407,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -514,11 +431,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/ns-3/pod-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-3/pod-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -541,9 +453,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-1.json",
|
||||
"resources/pods/namespaces/ns-3/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-3/pod-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -567,11 +476,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/ns-3/pod-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-3/pod-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -594,10 +498,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -620,10 +520,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -644,27 +540,8 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when all included resources are unresolvable, nothing is included",
|
||||
backup: defaultBackup().
|
||||
IncludedResources("unresolvable-1", "unresolvable-2").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("foo", "bar").Result(),
|
||||
builder.ForPod("zoo", "raz").Result(),
|
||||
),
|
||||
test.Deployments(
|
||||
builder.ForDeployment("foo", "bar").Result(),
|
||||
builder.ForDeployment("zoo", "raz").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{},
|
||||
},
|
||||
{
|
||||
name: "unresolvable excluded resources are ignored",
|
||||
backup: defaultBackup().
|
||||
@@ -683,35 +560,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when all excluded resources are unresolvable, nothing is excluded",
|
||||
backup: defaultBackup().
|
||||
IncludedResources("*").
|
||||
ExcludedResources("unresolvable-1", "unresolvable-2").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("foo", "bar").Result(),
|
||||
builder.ForPod("zoo", "raz").Result(),
|
||||
),
|
||||
test.Deployments(
|
||||
builder.ForDeployment("foo", "bar").Result(),
|
||||
builder.ForDeployment("zoo", "raz").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/foo/bar.json",
|
||||
"resources/pods/namespaces/zoo/raz.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -725,181 +573,6 @@ func TestBackupResourceFiltering(t *testing.T) {
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t)
|
||||
req = &Request{Backup: tc.backup}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
|
||||
for _, resource := range tc.apiResources {
|
||||
h.addItems(t, resource)
|
||||
}
|
||||
|
||||
h.backupper.Backup(h.log, req, backupFile, nil, nil)
|
||||
|
||||
assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCRDInclusion tests whether related CRDs are included, based on
|
||||
// backed-up resources and "include cluster resources" flag, and
|
||||
// verifies that the set of items written to the backup tarball are
|
||||
// correct. Validation is done by looking at the names of the files in
|
||||
// the backup tarball; the contents of the files are not checked.
|
||||
func TestCRDInclusion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
backup *velerov1.Backup
|
||||
apiResources []*test.APIResource
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "include cluster resources=auto includes all CRDs when running a full-cluster backup",
|
||||
backup: defaultBackup().
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/v1-preferredversion/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=false excludes all CRDs when backing up all namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(false).
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
"resources/volumesnapshotlocations.velero.io/v1-preferredversion/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=true includes all CRDs when running a full-cluster backup",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(true).
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/v1-preferredversion/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=auto includes CRDs with CRs when backing up selected namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludedNamespaces("foo").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/v1-preferredversion/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=false excludes all CRDs when backing up selected namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(false).
|
||||
IncludedNamespaces("foo").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
"resources/volumesnapshotlocations.velero.io/v1-preferredversion/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=true includes all CRDs when backing up selected namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(true).
|
||||
IncludedNamespaces("foo").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/v1beta1-preferredversion/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/v1-preferredversion/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -946,8 +619,6 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/deployments.extensions/namespaces/foo/bar.json",
|
||||
"resources/deployments.extensions/namespaces/zoo/raz.json",
|
||||
"resources/deployments.extensions/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.extensions/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -966,8 +637,6 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/deployments.apps/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/namespaces/zoo/raz.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/foo/bar.json",
|
||||
"resources/deployments.apps/v1-preferredversion/namespaces/zoo/raz.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -994,7 +663,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
// TestBackupUsesNewCohabitatingResourcesForEachBackup ensures that when two backups are
|
||||
// run that each include cohabitating resources, one copy of the relevant resources is
|
||||
// backed up in each backup. Verification is done by looking at the contents of the backup
|
||||
// tarball. This covers a specific issue that was fixed by https://github.com/vmware-tanzu/velero/pull/485.
|
||||
// tarball. This covers a specific issue that was fixed by https://github.com/heptio/velero/pull/485.
|
||||
func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
|
||||
@@ -1009,7 +678,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
|
||||
h.backupper.Backup(h.log, backup1, backup1File, nil, nil)
|
||||
|
||||
assertTarballContents(t, backup1File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json", "resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json")
|
||||
assertTarballContents(t, backup1File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json")
|
||||
|
||||
// run and verify backup 2
|
||||
backup2 := &Request{
|
||||
@@ -1019,7 +688,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
|
||||
h.backupper.Backup(h.log, backup2, backup2File, nil, nil)
|
||||
|
||||
assertTarballContents(t, backup2File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json", "resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json")
|
||||
assertTarballContents(t, backup2File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json")
|
||||
}
|
||||
|
||||
// TestBackupResourceOrdering runs backups of the core API group and ensures that items are backed
|
||||
@@ -1273,26 +942,6 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) {
|
||||
new(recordResourcesAction).ForNamespace("ns-2").ForResource("pods"): nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "action with a selector that has unresolvable resources doesn't run for any resources",
|
||||
backup: defaultBackup().
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("ns-1", "pod-1").Result(),
|
||||
),
|
||||
test.PVCs(
|
||||
builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(),
|
||||
),
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("pv-1").Result(),
|
||||
builder.ForPersistentVolume("pv-2").Result(),
|
||||
),
|
||||
},
|
||||
actions: map[*recordResourcesAction][]string{
|
||||
new(recordResourcesAction).ForResource("unresolvable"): nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -1563,9 +1212,6 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/namespaces/ns-3/pod-3.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-3/pod-3.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1592,7 +1238,6 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1624,9 +1269,6 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1655,7 +1297,6 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1686,44 +1327,10 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "additional items with the velero.io/exclude-from-backup label are not backed up",
|
||||
backup: defaultBackup().IncludedNamespaces("ns-1").Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("ns-1", "pod-1").Result(),
|
||||
),
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "true")).Result(),
|
||||
builder.ForPersistentVolume("pv-2").Result(),
|
||||
),
|
||||
},
|
||||
actions: []velero.BackupItemAction{
|
||||
&pluggableAction{
|
||||
executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) {
|
||||
additionalItems := []velero.ResourceIdentifier{
|
||||
{GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"},
|
||||
{GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"},
|
||||
}
|
||||
|
||||
return item, additionalItems, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/persistentvolumes/cluster/pv-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "if additional items aren't found in the API, they're skipped and the original item is still backed up",
|
||||
name: "if there's an error backing up additional items, the item the action was run for isn't backed up",
|
||||
backup: defaultBackup().Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -1746,12 +1353,8 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/namespaces/ns-3/pod-3.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-3/pod-3.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1949,7 +1552,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "persistent volume with deprecated zone annotation creates a snapshot",
|
||||
name: "persistent volume with zone annotation creates a snapshot",
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{
|
||||
@@ -1982,74 +1585,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "persistent volume with GA zone annotation creates a snapshot",
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("topology.kubernetes.io/zone", "zone-1")).Result(),
|
||||
),
|
||||
},
|
||||
snapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1", "type-1", 100, false),
|
||||
},
|
||||
want: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
ProviderVolumeID: "vol-1",
|
||||
VolumeAZ: "zone-1",
|
||||
VolumeType: "type-1",
|
||||
VolumeIOPS: int64Ptr(100),
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "vol-1-snapshot",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "persistent volume with both GA and deprecated zone annotation creates a snapshot and should use the GA",
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabelsMap(map[string]string{"failure-domain.beta.kubernetes.io/zone": "zone-1-deprecated", "topology.kubernetes.io/zone": "zone-1-ga"})).Result(),
|
||||
),
|
||||
},
|
||||
snapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1-ga", "type-1", 100, false),
|
||||
},
|
||||
want: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
ProviderVolumeID: "vol-1",
|
||||
VolumeAZ: "zone-1-ga",
|
||||
VolumeType: "type-1",
|
||||
VolumeIOPS: int64Ptr(100),
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "vol-1-snapshot",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error returned from CreateSnapshot results in a failed snapshot",
|
||||
req: &Request{
|
||||
@@ -2342,8 +1877,6 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
wantBackedUp: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -2393,8 +1926,6 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
wantBackedUp: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -2449,7 +1980,6 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
},
|
||||
wantBackedUp: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -2501,7 +2031,6 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
},
|
||||
wantBackedUp: []string{
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -2540,24 +2069,22 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type fakeResticBackupperFactory struct{}
|
||||
|
||||
func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (restic.Backupper, error) {
|
||||
return &fakeResticBackupper{}, nil
|
||||
type fakeResticBackupperFactory struct {
|
||||
podVolumeBackups []*velerov1.PodVolumeBackup
|
||||
}
|
||||
|
||||
type fakeResticBackupper struct{}
|
||||
func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (restic.Backupper, error) {
|
||||
return &fakeResticBackupper{
|
||||
podVolumeBackups: f.podVolumeBackups,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BackupPodVolumes returns one pod volume backup per entry in volumes, with namespace "velero"
|
||||
// and name "pvb-<pod-namespace>-<pod-name>-<volume-name>".
|
||||
func (b *fakeResticBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1.Pod, volumes []string, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, []error) {
|
||||
var res []*velerov1.PodVolumeBackup
|
||||
for _, vol := range volumes {
|
||||
pvb := builder.ForPodVolumeBackup("velero", fmt.Sprintf("pvb-%s-%s-%s", pod.Namespace, pod.Name, vol)).Result()
|
||||
res = append(res, pvb)
|
||||
}
|
||||
type fakeResticBackupper struct {
|
||||
podVolumeBackups []*velerov1.PodVolumeBackup
|
||||
}
|
||||
|
||||
return res, nil
|
||||
func (b *fakeResticBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1.Pod, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, []error) {
|
||||
return b.podVolumeBackups, nil
|
||||
}
|
||||
|
||||
// TestBackupWithRestic runs backups of pods that are annotated for restic backup,
|
||||
@@ -2582,28 +2109,7 @@ func TestBackupWithRestic(t *testing.T) {
|
||||
),
|
||||
},
|
||||
want: []*velerov1.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("velero", "pvb-ns-1-pod-1-foo").Result(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PVC is used by two pods and annotated for restic backup on both, only one should be backed up",
|
||||
backup: defaultBackup().Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("ns-1", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations("backup.velero.io/backup-volumes", "foo")).
|
||||
Volumes(builder.ForVolume("foo").PersistentVolumeClaimSource("pvc-1").Result()).
|
||||
Result(),
|
||||
),
|
||||
test.Pods(
|
||||
builder.ForPod("ns-1", "pod-2").
|
||||
ObjectMeta(builder.WithAnnotations("backup.velero.io/backup-volumes", "bar")).
|
||||
Volumes(builder.ForVolume("bar").PersistentVolumeClaimSource("pvc-1").Result()).
|
||||
Result(),
|
||||
),
|
||||
},
|
||||
want: []*velerov1.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("velero", "pvb-ns-1-pod-1-foo").Result(),
|
||||
builder.ForPodVolumeBackup("velero", "pvb-1").Result(),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -2638,8 +2144,8 @@ func TestBackupWithRestic(t *testing.T) {
|
||||
WithVolume("pv-2", "vol-2", "", "type-1", 100, false),
|
||||
},
|
||||
want: []*velerov1.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("velero", "pvb-ns-1-pod-1-vol-1").Result(),
|
||||
builder.ForPodVolumeBackup("velero", "pvb-ns-1-pod-1-vol-2").Result(),
|
||||
builder.ForPodVolumeBackup("velero", "pvb-1").Result(),
|
||||
builder.ForPodVolumeBackup("velero", "pvb-2").Result(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -2652,7 +2158,9 @@ func TestBackupWithRestic(t *testing.T) {
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
|
||||
h.backupper.resticBackupperFactory = new(fakeResticBackupperFactory)
|
||||
h.backupper.resticBackupperFactory = &fakeResticBackupperFactory{
|
||||
podVolumeBackups: tc.want,
|
||||
}
|
||||
|
||||
for _, resource := range tc.apiResources {
|
||||
h.addItems(t, resource)
|
||||
@@ -2726,9 +2234,9 @@ func newHarness(t *testing.T) *harness {
|
||||
return &harness{
|
||||
APIServer: apiServer,
|
||||
backupper: &kubernetesBackupper{
|
||||
backupClient: apiServer.VeleroClient.VeleroV1(),
|
||||
dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient),
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient),
|
||||
discoveryHelper: discoveryHelper,
|
||||
groupBackupperFactory: new(defaultGroupBackupperFactory),
|
||||
|
||||
// unsupported
|
||||
podCommandExecutor: nil,
|
||||
|
||||
@@ -21,21 +21,21 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/label"
|
||||
)
|
||||
|
||||
// NewDeleteBackupRequest creates a DeleteBackupRequest for the backup identified by name and uid.
|
||||
func NewDeleteBackupRequest(name string, uid string) *velerov1api.DeleteBackupRequest {
|
||||
return &velerov1api.DeleteBackupRequest{
|
||||
func NewDeleteBackupRequest(name string, uid string) *v1.DeleteBackupRequest {
|
||||
return &v1.DeleteBackupRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: name + "-",
|
||||
Labels: map[string]string{
|
||||
velerov1api.BackupNameLabel: label.GetValidName(name),
|
||||
velerov1api.BackupUIDLabel: uid,
|
||||
v1.BackupNameLabel: label.GetValidName(name),
|
||||
v1.BackupUIDLabel: uid,
|
||||
},
|
||||
},
|
||||
Spec: velerov1api.DeleteBackupRequestSpec{
|
||||
Spec: v1.DeleteBackupRequestSpec{
|
||||
BackupName: name,
|
||||
},
|
||||
}
|
||||
@@ -45,6 +45,6 @@ func NewDeleteBackupRequest(name string, uid string) *velerov1api.DeleteBackupRe
|
||||
// find DeleteBackupRequests for the backup identified by name and uid.
|
||||
func NewDeleteBackupRequestListOptions(name, uid string) metav1.ListOptions {
|
||||
return metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s,%s=%s", velerov1api.BackupNameLabel, label.GetValidName(name), velerov1api.BackupUIDLabel, uid),
|
||||
LabelSelector: fmt.Sprintf("%s=%s,%s=%s", v1.BackupNameLabel, label.GetValidName(name), v1.BackupUIDLabel, uid),
|
||||
}
|
||||
}
|
||||
|
||||
181
pkg/backup/group_backupper.go
Normal file
181
pkg/backup/group_backupper.go
Normal file
@@ -0,0 +1,181 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/podexec"
|
||||
"github.com/heptio/velero/pkg/restic"
|
||||
)
|
||||
|
||||
type groupBackupperFactory interface {
|
||||
newGroupBackupper(
|
||||
log logrus.FieldLogger,
|
||||
backupRequest *Request,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) groupBackupper
|
||||
}
|
||||
|
||||
type defaultGroupBackupperFactory struct{}
|
||||
|
||||
func (f *defaultGroupBackupperFactory) newGroupBackupper(
|
||||
log logrus.FieldLogger,
|
||||
backupRequest *Request,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) groupBackupper {
|
||||
return &defaultGroupBackupper{
|
||||
log: log,
|
||||
backupRequest: backupRequest,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
cohabitatingResources: cohabitatingResources,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
tarWriter: tarWriter,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: resticSnapshotTracker,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
|
||||
resourceBackupperFactory: &defaultResourceBackupperFactory{},
|
||||
}
|
||||
}
|
||||
|
||||
type groupBackupper interface {
|
||||
backupGroup(group *metav1.APIResourceList) error
|
||||
}
|
||||
|
||||
type defaultGroupBackupper struct {
|
||||
log logrus.FieldLogger
|
||||
backupRequest *Request
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
cohabitatingResources map[string]*cohabitatingResource
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
tarWriter tarWriter
|
||||
resticBackupper restic.Backupper
|
||||
resticSnapshotTracker *pvcSnapshotTracker
|
||||
resourceBackupperFactory resourceBackupperFactory
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
}
|
||||
|
||||
// backupGroup backs up a single API group.
|
||||
func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) error {
|
||||
log := gb.log.WithField("group", group.GroupVersion)
|
||||
|
||||
log.Infof("Backing up group")
|
||||
|
||||
// Parse so we can check if this is the core group
|
||||
gv, err := schema.ParseGroupVersion(group.GroupVersion)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing GroupVersion %q", group.GroupVersion)
|
||||
}
|
||||
if gv.Group == "" {
|
||||
// This is the core group, so make sure we process in the following order: pods, pvcs, pvs,
|
||||
// everything else.
|
||||
sortCoreGroup(group)
|
||||
}
|
||||
|
||||
rb := gb.resourceBackupperFactory.newResourceBackupper(
|
||||
log,
|
||||
gb.backupRequest,
|
||||
gb.dynamicFactory,
|
||||
gb.discoveryHelper,
|
||||
gb.cohabitatingResources,
|
||||
gb.podCommandExecutor,
|
||||
gb.tarWriter,
|
||||
gb.resticBackupper,
|
||||
gb.resticSnapshotTracker,
|
||||
gb.volumeSnapshotterGetter,
|
||||
)
|
||||
|
||||
for _, resource := range group.APIResources {
|
||||
if err := rb.backupResource(group, resource); err != nil {
|
||||
log.WithError(err).WithField("resource", resource.String()).Error("Error backing up API resource")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortCoreGroup sorts group as a coreGroup.
|
||||
func sortCoreGroup(group *metav1.APIResourceList) {
|
||||
sort.Stable(coreGroup(group.APIResources))
|
||||
}
|
||||
|
||||
// coreGroup is used to sort APIResources in the core API group. The sort order is pods, pvcs, pvs,
|
||||
// then everything else.
|
||||
type coreGroup []metav1.APIResource
|
||||
|
||||
func (c coreGroup) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
func (c coreGroup) Less(i, j int) bool {
|
||||
return coreGroupResourcePriority(c[i].Name) < coreGroupResourcePriority(c[j].Name)
|
||||
}
|
||||
|
||||
func (c coreGroup) Swap(i, j int) {
|
||||
c[j], c[i] = c[i], c[j]
|
||||
}
|
||||
|
||||
// These constants represent the relative priorities for resources in the core API group. We want to
|
||||
// ensure that we process pods, then pvcs, then pvs, then anything else. This ensures that when a
|
||||
// pod is backed up, we can perform a pre hook, then process pvcs and pvs (including taking a
|
||||
// snapshot), then perform a post hook on the pod.
|
||||
const (
|
||||
pod = iota
|
||||
pvc
|
||||
pv
|
||||
other
|
||||
)
|
||||
|
||||
// coreGroupResourcePriority returns the relative priority of the resource, in the following order:
|
||||
// pods, pvcs, pvs, everything else.
|
||||
func coreGroupResourcePriority(resource string) int {
|
||||
switch strings.ToLower(resource) {
|
||||
case "pods":
|
||||
return pod
|
||||
case "persistentvolumeclaims":
|
||||
return pvc
|
||||
case "persistentvolumes":
|
||||
return pv
|
||||
}
|
||||
|
||||
return other
|
||||
}
|
||||
50
pkg/backup/group_backupper_test.go
Normal file
50
pkg/backup/group_backupper_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2017, 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestSortCoreGroup(t *testing.T) {
|
||||
group := &metav1.APIResourceList{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "persistentvolumes"},
|
||||
{Name: "configmaps"},
|
||||
{Name: "antelopes"},
|
||||
{Name: "persistentvolumeclaims"},
|
||||
{Name: "pods"},
|
||||
},
|
||||
}
|
||||
|
||||
sortCoreGroup(group)
|
||||
|
||||
expected := []string{
|
||||
"pods",
|
||||
"persistentvolumeclaims",
|
||||
"persistentvolumes",
|
||||
"configmaps",
|
||||
"antelopes",
|
||||
}
|
||||
for i, r := range group.APIResources {
|
||||
assert.Equal(t, expected[i], r.Name)
|
||||
}
|
||||
}
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -34,18 +33,67 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
api "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
velerov1api "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
"github.com/heptio/velero/pkg/podexec"
|
||||
"github.com/heptio/velero/pkg/restic"
|
||||
"github.com/heptio/velero/pkg/volume"
|
||||
)
|
||||
|
||||
// itemBackupper can back up individual items to a tar writer.
|
||||
type itemBackupper struct {
|
||||
type itemBackupperFactory interface {
|
||||
newItemBackupper(
|
||||
backup *Request,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) ItemBackupper
|
||||
}
|
||||
|
||||
type defaultItemBackupperFactory struct{}
|
||||
|
||||
func (f *defaultItemBackupperFactory) newItemBackupper(
|
||||
backupRequest *Request,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) ItemBackupper {
|
||||
ib := &defaultItemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tarWriter,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: resticSnapshotTracker,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
|
||||
itemHookHandler: &defaultItemHookHandler{
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
},
|
||||
}
|
||||
|
||||
// this is for testing purposes
|
||||
ib.additionalItemBackupper = ib
|
||||
|
||||
return ib
|
||||
}
|
||||
|
||||
type ItemBackupper interface {
|
||||
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error
|
||||
}
|
||||
|
||||
type defaultItemBackupper struct {
|
||||
backupRequest *Request
|
||||
tarWriter tarWriter
|
||||
dynamicFactory client.DynamicFactory
|
||||
@@ -55,53 +103,48 @@ type itemBackupper struct {
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
|
||||
itemHookHandler itemHookHandler
|
||||
additionalItemBackupper ItemBackupper
|
||||
snapshotLocationVolumeSnapshotters map[string]velero.VolumeSnapshotter
|
||||
}
|
||||
|
||||
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
||||
// namespaces IncludesExcludes list.
|
||||
// In addition to the error return, backupItem also returns a bool indicating whether the item
|
||||
// was actually backed up.
|
||||
func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource) (bool, error) {
|
||||
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
namespace := metadata.GetNamespace()
|
||||
name := metadata.GetName()
|
||||
|
||||
log := logger.WithField("name", name)
|
||||
log = log.WithField("resource", groupResource)
|
||||
log = log.WithField("namespace", namespace)
|
||||
|
||||
if metadata.GetLabels()["velero.io/exclude-from-backup"] == "true" {
|
||||
log.Info("Excluding item because it has label velero.io/exclude-from-backup=true")
|
||||
return false, nil
|
||||
if namespace != "" {
|
||||
log = log.WithField("namespace", namespace)
|
||||
}
|
||||
|
||||
// NOTE: we have to re-check namespace & resource includes/excludes because it's possible that
|
||||
// backupItem can be invoked by a custom action.
|
||||
if namespace != "" && !ib.backupRequest.NamespaceIncludesExcludes.ShouldInclude(namespace) {
|
||||
log.Info("Excluding item because namespace is excluded")
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
|
||||
// false.
|
||||
if namespace == "" && groupResource != kuberesource.Namespaces && ib.backupRequest.Spec.IncludeClusterResources != nil && !*ib.backupRequest.Spec.IncludeClusterResources {
|
||||
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(groupResource.String()) {
|
||||
log.Info("Excluding item because resource is excluded")
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
if metadata.GetDeletionTimestamp() != nil {
|
||||
log.Info("Skipping item because it's being deleted.")
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
key := itemKey{
|
||||
@@ -112,8 +155,7 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
|
||||
if _, exists := ib.backupRequest.BackedUpItems[key]; exists {
|
||||
log.Info("Skipping item because it's already been backed up.")
|
||||
// returning true since this item *is* in the backup, even though we're not backing it up here
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
ib.backupRequest.BackedUpItems[key] = struct{}{}
|
||||
|
||||
@@ -121,7 +163,7 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
|
||||
log.Debug("Executing pre hooks")
|
||||
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePre); err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -138,34 +180,15 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
// nil it on error since it's not valid
|
||||
pod = nil
|
||||
} else {
|
||||
// Get the list of volumes to back up using restic from the pod's annotations. Remove from this list
|
||||
// any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario,
|
||||
// where it's been backed up from another pod), since we don't need >1 backup per PVC.
|
||||
for _, volume := range restic.GetVolumesToBackup(pod) {
|
||||
if found, pvcName := ib.resticSnapshotTracker.HasPVCForPodVolume(pod, volume); found {
|
||||
log.WithFields(map[string]interface{}{
|
||||
"podVolume": volume,
|
||||
"pvcName": pvcName,
|
||||
}).Info("Pod volume uses a persistent volume claim which has already been backed up with restic from another pod, skipping.")
|
||||
continue
|
||||
}
|
||||
// get the volumes to backup using restic, and add any of them that are PVCs to the pvc snapshot
|
||||
// tracker, so that when we backup PVCs/PVs via an item action in the next step, we don't snapshot
|
||||
// PVs that will have their data backed up with restic.
|
||||
resticVolumesToBackup = restic.GetVolumesToBackup(pod)
|
||||
|
||||
resticVolumesToBackup = append(resticVolumesToBackup, volume)
|
||||
}
|
||||
|
||||
// track the volumes that are PVCs using the PVC snapshot tracker, so that when we backup PVCs/PVs
|
||||
// via an item action in the next step, we don't snapshot PVs that will have their data backed up
|
||||
// with restic.
|
||||
ib.resticSnapshotTracker.Track(pod, resticVolumesToBackup)
|
||||
}
|
||||
}
|
||||
|
||||
// capture the version of the object before invoking plugin actions as the plugin may update
|
||||
// the group version of the object.
|
||||
// group version of this object
|
||||
// Used on filepath to backup up all groups and versions
|
||||
version := resourceVersion(obj)
|
||||
|
||||
updatedObj, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata)
|
||||
if err != nil {
|
||||
backupErrs = append(backupErrs, err)
|
||||
@@ -176,11 +199,11 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
backupErrs = append(backupErrs, err)
|
||||
}
|
||||
|
||||
return false, kubeerrs.NewAggregate(backupErrs)
|
||||
return kubeerrs.NewAggregate(backupErrs)
|
||||
}
|
||||
obj = updatedObj
|
||||
if metadata, err = meta.Accessor(obj); err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
// update name and namespace in case they were modified in an action
|
||||
name = metadata.GetName()
|
||||
@@ -207,31 +230,19 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
}
|
||||
|
||||
if len(backupErrs) != 0 {
|
||||
return false, kubeerrs.NewAggregate(backupErrs)
|
||||
return kubeerrs.NewAggregate(backupErrs)
|
||||
}
|
||||
|
||||
// Getting the preferred group version of this resource
|
||||
preferredVersion := preferredGVR.Version
|
||||
|
||||
var filePath string
|
||||
|
||||
// API Group version is now part of path of backup as a subdirectory
|
||||
// it will add a prefix to subdirectory name for the preferred version
|
||||
versionPath := version
|
||||
|
||||
if version == preferredVersion {
|
||||
versionPath = version + velerov1api.PreferredVersionDir
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), versionPath, velerov1api.NamespaceScopedDir, namespace, name+".json")
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
|
||||
} else {
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), versionPath, velerov1api.ClusterScopedDir, name+".json")
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
|
||||
}
|
||||
|
||||
itemBytes, err := json.Marshal(obj.UnstructuredContent())
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
@@ -243,46 +254,19 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
}
|
||||
|
||||
if err := ib.tarWriter.WriteHeader(hdr); err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := ib.tarWriter.Write(itemBytes); err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// backing up the preferred version backup without API Group version on path - this is for backward compability
|
||||
|
||||
log.Debugf("Resource %s/%s, version= %s, preferredVersion=%s", groupResource.String(), name, version, preferredVersion)
|
||||
if version == preferredVersion {
|
||||
if namespace != "" {
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), velerov1api.NamespaceScopedDir, namespace, name+".json")
|
||||
} else {
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), velerov1api.ClusterScopedDir, name+".json")
|
||||
}
|
||||
|
||||
hdr = &tar.Header{
|
||||
Name: filePath,
|
||||
Size: int64(len(itemBytes)),
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0755,
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
|
||||
if err := ib.tarWriter.WriteHeader(hdr); err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := ib.tarWriter.Write(itemBytes); err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a list of PodVolumeBackups
|
||||
// for volumes that were successfully backed up, and a slice of any errors that were encountered.
|
||||
func (ib *itemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) ([]*velerov1api.PodVolumeBackup, []error) {
|
||||
func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) ([]*velerov1api.PodVolumeBackup, []error) {
|
||||
if len(volumes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -292,10 +276,10 @@ func (ib *itemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return ib.resticBackupper.BackupPodVolumes(ib.backupRequest.Backup, pod, volumes, log)
|
||||
return ib.resticBackupper.BackupPodVolumes(ib.backupRequest.Backup, pod, log)
|
||||
}
|
||||
|
||||
func (ib *itemBackupper) executeActions(
|
||||
func (ib *defaultItemBackupper) executeActions(
|
||||
log logrus.FieldLogger,
|
||||
obj runtime.Unstructured,
|
||||
groupResource schema.GroupResource,
|
||||
@@ -342,20 +326,12 @@ func (ib *itemBackupper) executeActions(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, err := client.Get(additionalItem.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"groupResource": additionalItem.GroupResource,
|
||||
"namespace": additionalItem.Namespace,
|
||||
"name": additionalItem.Name,
|
||||
}).Warnf("Additional item was not found in Kubernetes API, can't back it up")
|
||||
continue
|
||||
}
|
||||
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err = ib.backupItem(log, item, gvr.GroupResource(), gvr); err != nil {
|
||||
if err = ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -366,7 +342,7 @@ func (ib *itemBackupper) executeActions(
|
||||
|
||||
// volumeSnapshotter instantiates and initializes a VolumeSnapshotter given a VolumeSnapshotLocation,
|
||||
// or returns an existing one if one's already been initialized for the location.
|
||||
func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (velero.VolumeSnapshotter, error) {
|
||||
func (ib *defaultItemBackupper) volumeSnapshotter(snapshotLocation *api.VolumeSnapshotLocation) (velero.VolumeSnapshotter, error) {
|
||||
if bs, ok := ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name]; ok {
|
||||
return bs, nil
|
||||
}
|
||||
@@ -388,22 +364,17 @@ func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeS
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// zoneLabelDeprecated is the label that stores availability-zone info
|
||||
// on PVs this is deprecated on Kubernetes >= 1.17.0
|
||||
// zoneLabel is the label that stores availability-zone info
|
||||
// on PVs
|
||||
const (
|
||||
zoneLabelDeprecated = "failure-domain.beta.kubernetes.io/zone"
|
||||
zoneLabel = "topology.kubernetes.io/zone"
|
||||
)
|
||||
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
|
||||
|
||||
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
|
||||
// disk type and IOPS (if applicable) to be able to restore to current state later.
|
||||
func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger) error {
|
||||
func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger) error {
|
||||
log.Info("Executing takePVSnapshot")
|
||||
|
||||
if boolptr.IsSetToFalse(ib.backupRequest.Spec.SnapshotVolumes) {
|
||||
if ib.backupRequest.Spec.SnapshotVolumes != nil && !*ib.backupRequest.Spec.SnapshotVolumes {
|
||||
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
|
||||
return nil
|
||||
}
|
||||
@@ -419,20 +390,14 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
// of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
if ib.resticSnapshotTracker.Has(pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) {
|
||||
log.Info("Skipping snapshot of persistent volume because volume is being backed up with restic.")
|
||||
log.Info("Skipping persistent volume snapshot because volume has already been backed up with restic.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: -- once failure-domain.beta.kubernetes.io/zone is no longer
|
||||
// supported in any velero-supported version of Kubernetes, remove fallback checking of it
|
||||
pvFailureDomainZone, labelFound := pv.Labels[zoneLabel]
|
||||
if !labelFound {
|
||||
log.Infof("label %q is not present on PersistentVolume, checking deprecated label...", zoneLabel)
|
||||
pvFailureDomainZone, labelFound = pv.Labels[zoneLabelDeprecated]
|
||||
if !labelFound {
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabelDeprecated)
|
||||
}
|
||||
pvFailureDomainZone := pv.Labels[zoneLabel]
|
||||
if pvFailureDomainZone == "" {
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -471,10 +436,9 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
|
||||
log = log.WithField("volumeID", volumeID)
|
||||
|
||||
// create tags from the backup's labels
|
||||
tags := map[string]string{}
|
||||
for k, v := range ib.backupRequest.GetLabels() {
|
||||
tags[k] = v
|
||||
tags := ib.backupRequest.GetLabels()
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
}
|
||||
tags["velero.io/backup"] = ib.backupRequest.Name
|
||||
tags["velero.io/pv"] = pv.Name
|
||||
@@ -503,7 +467,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
return kubeerrs.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func volumeSnapshot(backup *velerov1api.Backup, volumeName, volumeID, volumeType, az, location string, iops *int64) *volume.Snapshot {
|
||||
func volumeSnapshot(backup *api.Backup, volumeName, volumeID, volumeType, az, location string, iops *int64) *volume.Snapshot {
|
||||
return &volume.Snapshot{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: backup.Name,
|
||||
@@ -527,10 +491,3 @@ func resourceKey(obj runtime.Unstructured) string {
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
return fmt.Sprintf("%s/%s", gvk.GroupVersion().String(), gvk.Kind)
|
||||
}
|
||||
|
||||
// resourceVersion returns a string representing the object's API Version (e.g.
|
||||
// v1 if item belongs to apps/v1
|
||||
func resourceVersion(obj runtime.Unstructured) string {
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
return gvk.Version
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/heptio/velero/pkg/builder"
|
||||
)
|
||||
|
||||
func Test_resourceKey(t *testing.T) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user