mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-03-27 12:05:05 +00:00
Compare commits
113 Commits
v1.2.0-bet
...
v1.3.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2fbcc04d03 | ||
|
|
f3409c406a | ||
|
|
88d123fcdc | ||
|
|
e794d8404d | ||
|
|
b25fea3bea | ||
|
|
c5e8bdd7c8 | ||
|
|
08c549a092 | ||
|
|
c9bc6646a5 | ||
|
|
21264a11ec | ||
|
|
1f2375a53d | ||
|
|
37011ee5ac | ||
|
|
42b6126458 | ||
|
|
2f2666f5d8 | ||
|
|
6745979a7b | ||
|
|
4a5a63fc92 | ||
|
|
f7adc4dfd1 | ||
|
|
445b5b781e | ||
|
|
710beb96c2 | ||
|
|
b19097f825 | ||
|
|
f00922ddf1 | ||
|
|
5b1280c2cd | ||
|
|
82d6ad4ae3 | ||
|
|
fc3ec9ff2c | ||
|
|
ec22f2c88d | ||
|
|
421dcd4e8d | ||
|
|
71201fe929 | ||
|
|
ae316193ea | ||
|
|
a10f57d720 | ||
|
|
c0eef6009e | ||
|
|
823b1f7d6d | ||
|
|
d4ccdd6460 | ||
|
|
dcca3c3d2b | ||
|
|
65e970f59d | ||
|
|
3b80e00d62 | ||
|
|
ee0cbcf11e | ||
|
|
254a5eebb5 | ||
|
|
aa44cf1c32 | ||
|
|
f4f2351411 | ||
|
|
9fa302aa8b | ||
|
|
b2acd3b683 | ||
|
|
61b7c7dee0 | ||
|
|
04d8b47d3e | ||
|
|
555e8ff2e3 | ||
|
|
97b33402a4 | ||
|
|
c6ff6333a9 | ||
|
|
b9d02795b5 | ||
|
|
c832e52905 | ||
|
|
f43c1ad55e | ||
|
|
408c0dfb5a | ||
|
|
93e4737258 | ||
|
|
be140985c5 | ||
|
|
4907bea398 | ||
|
|
2cd0b540bd | ||
|
|
a98bfd14c5 | ||
|
|
0fc42662aa | ||
|
|
d09aee9fae | ||
|
|
13fb36cda4 | ||
|
|
6d71e288ef | ||
|
|
cc142d9ad7 | ||
|
|
bf3952338b | ||
|
|
2755496563 | ||
|
|
cd860771c2 | ||
|
|
171f329fcc | ||
|
|
88d35290bf | ||
|
|
ebace913d8 | ||
|
|
aa9423593f | ||
|
|
89f0309e7b | ||
|
|
22e8c4e045 | ||
|
|
ff889283b8 | ||
|
|
96ca41ca9a | ||
|
|
7c60829f38 | ||
|
|
96297ea437 | ||
|
|
bbaa0196d1 | ||
|
|
83ef4eb4d0 | ||
|
|
6391b84dc6 | ||
|
|
f42406723c | ||
|
|
91bbb98cab | ||
|
|
08d9a3c507 | ||
|
|
87be775139 | ||
|
|
6395fa086d | ||
|
|
5296227dac | ||
|
|
6046e3cc0a | ||
|
|
a5a9827d9c | ||
|
|
36c7ebf4b7 | ||
|
|
d33014bf3c | ||
|
|
0704c77ba1 | ||
|
|
e5a85f140a | ||
|
|
cd045be028 | ||
|
|
63cdc5f087 | ||
|
|
374eee776e | ||
|
|
54f94ecd67 | ||
|
|
9e2f2d8419 | ||
|
|
3add90ba52 | ||
|
|
5a31a78df5 | ||
|
|
5d008491bb | ||
|
|
c090050c5a | ||
|
|
b9f4282201 | ||
|
|
7d27f951ff | ||
|
|
f016aae9c3 | ||
|
|
e1bdc417ca | ||
|
|
a367bfa829 | ||
|
|
5cb7f94bfd | ||
|
|
df374d5079 | ||
|
|
ef1178ff11 | ||
|
|
121dc02d2f | ||
|
|
3c6842bfe1 | ||
|
|
4fb1bc2ef3 | ||
|
|
636a5b9db6 | ||
|
|
bf7df45e53 | ||
|
|
c5d0110429 | ||
|
|
984e2ce589 | ||
|
|
83752d28d7 | ||
|
|
5d6b129ea5 |
14
.github/workflows/pr.yml
vendored
Normal file
14
.github/workflows/pr.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Pull Request CI Check
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Run CI
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
@@ -1,11 +1,12 @@
|
||||
## Current release:
|
||||
* [CHANGELOG-1.1.md][11]
|
||||
* [CHANGELOG-1.2.md][12]
|
||||
|
||||
## Development release:
|
||||
* [v1.2.0-beta.1][12]
|
||||
* [CHANGELOG-1.3.md][13]
|
||||
* [Unreleased Changes][0]
|
||||
|
||||
## Older releases:
|
||||
* [CHANGELOG-1.1.md][11]
|
||||
* [CHANGELOG-1.0.md][10]
|
||||
* [CHANGELOG-0.11.md][9]
|
||||
* [CHANGELOG-0.10.md][8]
|
||||
@@ -18,6 +19,7 @@
|
||||
* [CHANGELOG-0.3.md][1]
|
||||
|
||||
|
||||
[13]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.3.md
|
||||
[12]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.2.md
|
||||
[11]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.1.md
|
||||
[10]: https://github.com/vmware-tanzu/velero/blob/master/changelogs/CHANGELOG-1.0.md
|
||||
|
||||
@@ -2,75 +2,83 @@
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in the Velero project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
Examples of behavior that contributes to a positive environment for our community include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall community
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at oss-coc@vmware.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [oss-coc@vmware.com](mailto:oss-coc@vmware.com). All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
All community leaders are obligated to respect the privacy and security of the reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0,
|
||||
available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
|
||||
|
||||
@@ -1,70 +1,3 @@
|
||||
# Contributing
|
||||
|
||||
## CHANGELOG
|
||||
|
||||
Authors are expected to include a changelog file with their pull requests. The changelog file
|
||||
should be a new file created in the `changelogs/unreleased` folder. The file should follow the
|
||||
naming convention of `pr-username` and the contents of the file should be your text for the
|
||||
changelog.
|
||||
|
||||
velero/changelogs/unreleased <- folder
|
||||
000-username <- file
|
||||
|
||||
|
||||
## DCO Sign off
|
||||
|
||||
All authors to the project retain copyright to their work. However, to ensure
|
||||
that they are only submitting work that they have rights to, we are requiring
|
||||
everyone to acknowledge this by signing their work.
|
||||
|
||||
Any copyright notices in this repo should specify the authors as "the Velero contributors".
|
||||
|
||||
To sign your work, just add a line like this at the end of your commit message:
|
||||
|
||||
```
|
||||
Signed-off-by: Joe Beda <joe@heptio.com>
|
||||
```
|
||||
|
||||
This can easily be done with the `--signoff` option to `git commit`.
|
||||
|
||||
By doing this you state that you can certify the following (from https://developercertificate.org/):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
Authors are expected to follow some guidelines when submitting PRs. Please see [our documentation](https://velero.io/docs/master/code-standards/) for details.
|
||||
|
||||
@@ -18,9 +18,9 @@ LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ca-certificates wget bzip2 && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.5/restic_0.9.5_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.5_linux_amd64.bz2 && \
|
||||
mv restic_0.9.5_linux_amd64 /usr/bin/restic && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.6/restic_0.9.6_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.6_linux_amd64.bz2 && \
|
||||
mv restic_0.9.6_linux_amd64 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic && \
|
||||
apt-get remove -y wget bzip2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
@@ -28,6 +28,6 @@ RUN apt-get update && \
|
||||
|
||||
ADD /bin/linux/amd64/velero /velero
|
||||
|
||||
USER nobody:nobody
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
|
||||
23
Dockerfile-velero-arm
Normal file
23
Dockerfile-velero-arm
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM arm32v7/ubuntu:bionic
|
||||
|
||||
ADD /bin/linux/arm/restic /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/arm/velero /velero
|
||||
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
23
Dockerfile-velero-arm64
Normal file
23
Dockerfile-velero-arm64
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM arm64v8/ubuntu:bionic
|
||||
|
||||
ADD /bin/linux/arm64/restic /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/arm64/velero /velero
|
||||
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
@@ -12,21 +12,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:bionic
|
||||
FROM ppc64le/ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ca-certificates wget && \
|
||||
wget --quiet https://oplab9.parqtec.unicamp.br/pub/ppc64el/restic/restic-0.9.5 && \
|
||||
mv restic-0.9.5 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic && \
|
||||
apt-get remove -y wget && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
LABEL maintainer="Prajyot Parab <prajyot.parab@ibm.com>"
|
||||
|
||||
ADD /bin/linux/ppc64le/restic /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/ppc64le/velero /velero
|
||||
|
||||
USER nobody:nobody
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT ["/velero"]
|
||||
|
||||
@@ -18,6 +18,6 @@ LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ADD /bin/linux/amd64/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nobody
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2018, 2019 the Velero contributors.
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,8 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:bionic
|
||||
FROM arm32v7/ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
ADD /bin/linux/arm/velero-restic-restore-helper .
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c", "while true; do sleep 10000; done"]
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2019 the Velero contributors.
|
||||
# Copyright 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,8 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:bionic
|
||||
FROM arm64v8/ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
ADD /bin/linux/arm64/velero-restic-restore-helper .
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c", "while true; do sleep 10000; done"]
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
@@ -12,12 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:bionic
|
||||
FROM ppc64le/ubuntu:bionic
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
LABEL maintainer="Prajyot Parab <prajyot.parab@ibm.com>"
|
||||
|
||||
ADD /bin/linux/ppc64le/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nobody
|
||||
USER nobody:nogroup
|
||||
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
|
||||
121
Gopkg.lock
generated
121
Gopkg.lock
generated
@@ -144,15 +144,23 @@
|
||||
version = "v0.2.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f"
|
||||
digest = "1:bde9f189072512ba353f3641d4839cb4c9c7edf421e467f2c03f267b402bd16c"
|
||||
name = "github.com/gofrs/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "6b08a5c5172ba18946672b49749cde22873dd7c2"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "100ba4e885062801d56799d78530b73b178a78f3"
|
||||
version = "v0.4"
|
||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
|
||||
@@ -203,12 +211,11 @@
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:32e5a56c443b5581e4bf6e74cdc78b5826d7e4c5df43883e2dc31e4d7f4ae98a"
|
||||
digest = "1:27d609eb0e1e84ad3be8c5214835b7728557954e87b6a1222182e3664152f6ab"
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ca137eb4b4389c9bc6f1a6d887f056bf16c00510"
|
||||
revision = "ff2cf002a8dd750586d91dddd4470c341f981fe1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:980fd2c6afd6c268284d46dd66671771184a4002f6d516492cc596d2ca003543"
|
||||
@@ -271,12 +278,12 @@
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
|
||||
digest = "1:996dd5b4e2def9c256d4aa732db9a2d16d00a5eaf264139fa4bce5a4d2dc5fde"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
|
||||
version = "1.1.4"
|
||||
revision = "03217c3e97663914aec3faafde50d081f197a0a2"
|
||||
version = "v1.1.8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -343,15 +350,16 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
digest = "1:097cc61836050f45cbb712ae3bb45d66fba464c16b8fac09907fa3c1f753eff6"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/internal",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
revision = "4ab88e80c249ed361d3299e2930427d9ac43ef8d"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -374,17 +382,15 @@
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c4a213a8d73fbb0b13f717ba7996116602ef18ecb42b91d77405877914cb0349"
|
||||
digest = "1:afe5de112e0ca26a37730f01bc4bac9aabe9843cbfd66034f0c16e5a1fbd045b"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
"internal/fs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "94663424ae5ae9856b40a9f170762b4197024661"
|
||||
revision = "833678b5bb319f2d20a475cb165c6cc59c2cc77c"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f53493533f0689ff978122bb36801af47fe549828ce786af9166694394c3fa0d"
|
||||
@@ -393,14 +399,6 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca"
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31c5d934770c8b0698c28eb8576cb39b14e2fcf3c5f2a6e8449116884cd92e3f"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
@@ -618,9 +616,10 @@
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:760e08df99c3c3b53764ef7c41c03ea9d90e8594d9df42364d9209e99a0352e1"
|
||||
digest = "1:6ebd87b9a08afc8894de3f5d6f1dbc56494c36c5136593ddaf8997d73fc1967c"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
@@ -640,8 +639,11 @@
|
||||
"coordination/v1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"discovery/v1alpha1",
|
||||
"discovery/v1beta1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"flowcontrol/v1alpha1",
|
||||
"networking/v1",
|
||||
"networking/v1beta1",
|
||||
"node/v1alpha1",
|
||||
@@ -659,23 +661,24 @@
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "3544db3b9e4494309507e02eced5cd9dcff47e6a"
|
||||
version = "kubernetes-1.15.3"
|
||||
revision = "4c9a86741a7ab3890dd9e0777e85d8eee48bf59c"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d26e3d9d22801d0a4de9b20264eba64f962e637952c937b1afb745ecbe69b4cb"
|
||||
digest = "1:6a69bcc2370321cbb36a4afbfc1dfb73e95686aff4ee30993e5728007ab4f022"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/install",
|
||||
"pkg/apis/apiextensions/v1",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0dbe462fe92dfa8b56cc9facf0658a17d0c70fc5"
|
||||
version = "kubernetes-1.15.3"
|
||||
revision = "ea07e0496fdfc3befebe481bd6f850febc41cdae"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b1e28ead203a2ac2c1d82980760e26bdb34dbd47428cfe76f50c81b7d761b39e"
|
||||
digest = "1:fadc182dd0445c5ddcc81710e974a1f91ad9df994934638c239f080b2df2b079"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
@@ -728,11 +731,19 @@
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "f2f3a405f61d6c2cdc0d00687c1b5d90de91e9f0"
|
||||
version = "kubernetes-1.15.3"
|
||||
revision = "79c2a76c473a20cdc4ce59cae4b72529b5d9d16b"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:07c45445bd6c49a1b12cef2def45a866a440c55dda0ef62e93a012bffaff22f8"
|
||||
digest = "1:ff42a0e8537ca86b2dfd227958477cd4c40a40b1d76838ddbcb89ba80a2b87f3"
|
||||
name = "k8s.io/cli-runtime"
|
||||
packages = ["pkg/printers"]
|
||||
pruneopts = "NUT"
|
||||
revision = "dbe977a4ce31c6d5f786c9394a3c8993095ad4fc"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6293fad3808cf7d6584cfeb8b9b27012aa3f7b02f9c28c3d64ac498181eedd0d"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
@@ -741,6 +752,7 @@
|
||||
"dynamic/fake",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
@@ -763,10 +775,15 @@
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/discovery",
|
||||
"informers/discovery/v1alpha1",
|
||||
"informers/discovery/v1beta1",
|
||||
"informers/events",
|
||||
"informers/events/v1beta1",
|
||||
"informers/extensions",
|
||||
"informers/extensions/v1beta1",
|
||||
"informers/flowcontrol",
|
||||
"informers/flowcontrol/v1alpha1",
|
||||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
@@ -793,6 +810,8 @@
|
||||
"kubernetes",
|
||||
"kubernetes/fake",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1",
|
||||
"kubernetes/typed/admissionregistration/v1/fake",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1",
|
||||
@@ -831,10 +850,16 @@
|
||||
"kubernetes/typed/coordination/v1beta1/fake",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/core/v1/fake",
|
||||
"kubernetes/typed/discovery/v1alpha1",
|
||||
"kubernetes/typed/discovery/v1alpha1/fake",
|
||||
"kubernetes/typed/discovery/v1beta1",
|
||||
"kubernetes/typed/discovery/v1beta1/fake",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/events/v1beta1/fake",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1/fake",
|
||||
"kubernetes/typed/flowcontrol/v1alpha1",
|
||||
"kubernetes/typed/flowcontrol/v1alpha1/fake",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/networking/v1/fake",
|
||||
"kubernetes/typed/networking/v1beta1",
|
||||
@@ -865,6 +890,7 @@
|
||||
"kubernetes/typed/storage/v1alpha1/fake",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"kubernetes/typed/storage/v1beta1/fake",
|
||||
"listers/admissionregistration/v1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
@@ -880,8 +906,11 @@
|
||||
"listers/coordination/v1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/discovery/v1alpha1",
|
||||
"listers/discovery/v1beta1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/flowcontrol/v1alpha1",
|
||||
"listers/networking/v1",
|
||||
"listers/networking/v1beta1",
|
||||
"listers/node/v1alpha1",
|
||||
@@ -933,8 +962,8 @@
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e14f31a72a77f7aa82a95eaf542d1194fb027d04"
|
||||
version = "kubernetes-1.15.3"
|
||||
revision = "c68b62b1efa14564a47d67c07f013dc3553937b9"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2c16dda1c44c2564a7818fbacb701323c16d77c21b969987c1bec08d3ee0b050"
|
||||
@@ -952,17 +981,9 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "d83b052f768a50a309c692a9c271da3f3276ff88"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:da84676239a79340c7ef4d8587097bf07ecde0d72cd339dd6a4d3f5fedce7cd3"
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = ["pkg/printers"]
|
||||
pruneopts = "NUT"
|
||||
revision = "2d3c76f9091b6bec110a5e63777c332469e0cba2"
|
||||
version = "v1.15.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5011e453320ffdc30c5a06516f4de57df6e1c094b2923a1c7e699ea3f4364bc9"
|
||||
digest = "1:99dbf7ef753ca02399778c93a4ed4b689b28668317134ecd1fabeb07be70f354"
|
||||
name = "k8s.io/utils"
|
||||
packages = [
|
||||
"buffer",
|
||||
@@ -971,7 +992,7 @@
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "21c4ce38f2a793ec01e925ddc31216500183b773"
|
||||
revision = "f07c713de88362aef7545072487d6118bd4a3d4a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
@@ -995,6 +1016,7 @@
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"github.com/evanphx/json-patch",
|
||||
"github.com/gobwas/glob",
|
||||
"github.com/gofrs/uuid",
|
||||
"github.com/golang/protobuf/proto",
|
||||
"github.com/hashicorp/go-hclog",
|
||||
"github.com/hashicorp/go-plugin",
|
||||
@@ -1003,7 +1025,6 @@
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/robfig/cron",
|
||||
"github.com/satori/go.uuid",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/spf13/afero",
|
||||
"github.com/spf13/cobra",
|
||||
@@ -1043,6 +1064,7 @@
|
||||
"k8s.io/apimachinery/pkg/util/validation",
|
||||
"k8s.io/apimachinery/pkg/util/wait",
|
||||
"k8s.io/apimachinery/pkg/watch",
|
||||
"k8s.io/cli-runtime/pkg/printers",
|
||||
"k8s.io/client-go/discovery",
|
||||
"k8s.io/client-go/discovery/fake",
|
||||
"k8s.io/client-go/dynamic",
|
||||
@@ -1069,7 +1091,6 @@
|
||||
"k8s.io/client-go/util/flowcontrol",
|
||||
"k8s.io/client-go/util/workqueue",
|
||||
"k8s.io/klog",
|
||||
"k8s.io/kubernetes/pkg/printers",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
44
Gopkg.toml
44
Gopkg.toml
@@ -29,30 +29,35 @@
|
||||
# Kubernetes packages
|
||||
#
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.15"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.15.3"
|
||||
version = "~0.17"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.15.3"
|
||||
version = "~0.17"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.15.3"
|
||||
version = "~0.17"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.15.3"
|
||||
version = "~0.17"
|
||||
|
||||
# k8s.io/client-go kubernetes-1.15.3 uses v1.1.4
|
||||
[[constraint]]
|
||||
name = "k8s.io/cli-runtime"
|
||||
version = "~0.17"
|
||||
|
||||
# k8s.io/client-go v0.17 uses v1.1.8
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
version = "=1.1.4"
|
||||
version = "=1.1.8"
|
||||
|
||||
# k8s.io/client-go v0.17 uses v1.2.1
|
||||
[[override]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "=1.2.1"
|
||||
|
||||
#
|
||||
# Cloud provider packages
|
||||
@@ -78,8 +83,8 @@
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "~1.2.0"
|
||||
name = "github.com/gofrs/uuid"
|
||||
version = "~3.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/afero"
|
||||
@@ -101,6 +106,12 @@
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
revision = "a1bc61569a26c0f65865932c0d55743b0567c494"
|
||||
|
||||
# use the version specified by go.mod in the go-plugin repo
|
||||
# to make future migration to go modules easier
|
||||
[[override]]
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
revision = "ff2cf002a8dd750586d91dddd4470c341f981fe1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "~v1.3.1"
|
||||
@@ -120,3 +131,12 @@
|
||||
[[override]]
|
||||
name = "golang.org/x/sys"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "~1.0"
|
||||
|
||||
# needed by the prometheus golang client
|
||||
[[override]]
|
||||
name = "github.com/prometheus/procfs"
|
||||
version = "=0.0.2"
|
||||
|
||||
120
Makefile
120
Makefile
@@ -33,36 +33,53 @@ VERSION ?= master
|
||||
|
||||
TAG_LATEST ?= false
|
||||
|
||||
# The version of restic binary to be downloaded for power architecture
|
||||
RESTIC_VERSION ?= 0.9.6
|
||||
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 windows-amd64 linux-ppc64le
|
||||
CONTAINER_PLATFORMS ?= linux-amd64 linux-ppc64le linux-arm linux-arm64
|
||||
MANIFEST_PLATFORMS ?= amd64 ppc64le arm arm64
|
||||
|
||||
###
|
||||
### These variables should not need tweaking.
|
||||
###
|
||||
|
||||
CLI_PLATFORMS := linux-amd64 linux-arm linux-arm64 darwin-amd64 windows-amd64 linux-ppc64le
|
||||
CONTAINER_PLATFORMS := linux-amd64 linux-arm linux-arm64 linux-ppc64le
|
||||
|
||||
platform_temp = $(subst -, ,$(ARCH))
|
||||
GOOS = $(word 1, $(platform_temp))
|
||||
GOARCH = $(word 2, $(platform_temp))
|
||||
|
||||
# TODO(ncdc): support multiple image architectures once gcr.io supports manifest lists
|
||||
# Set default base image dynamically for each arch
|
||||
ifeq ($(GOARCH),amd64)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)
|
||||
local-arch:
|
||||
@echo "local environment for amd64 is up-to-date"
|
||||
endif
|
||||
ifeq ($(GOARCH),arm)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)-arm
|
||||
local-arch:
|
||||
@mkdir -p _output/bin/linux/arm/
|
||||
@wget -q -O - https://github.com/restic/restic/releases/download/v$(RESTIC_VERSION)/restic_$(RESTIC_VERSION)_linux_arm.bz2 | bunzip2 > _output/bin/linux/arm/restic
|
||||
@chmod a+x _output/bin/linux/arm/restic
|
||||
endif
|
||||
ifeq ($(GOARCH),arm64)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)-arm64
|
||||
local-arch:
|
||||
@mkdir -p _output/bin/linux/arm64/
|
||||
@wget -q -O - https://github.com/restic/restic/releases/download/v$(RESTIC_VERSION)/restic_$(RESTIC_VERSION)_linux_arm64.bz2 | bunzip2 > _output/bin/linux/arm64/restic
|
||||
@chmod a+x _output/bin/linux/arm64/restic
|
||||
endif
|
||||
#ifeq ($(GOARCH),arm)
|
||||
# DOCKERFILE ?= Dockerfile.arm #armel/busybox
|
||||
#endif
|
||||
#ifeq ($(GOARCH),arm64)
|
||||
# DOCKERFILE ?= Dockerfile.arm64 #aarch64/busybox
|
||||
#endif
|
||||
ifeq ($(GOARCH),ppc64le)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)-ppc64le
|
||||
local-arch:
|
||||
RESTIC_VERSION=$(RESTIC_VERSION) \
|
||||
./hack/get-restic-ppc64le.sh
|
||||
endif
|
||||
|
||||
IMAGE = $(REGISTRY)/$(BIN)
|
||||
MULTIARCH_IMAGE = $(REGISTRY)/$(BIN)
|
||||
IMAGE ?= $(REGISTRY)/$(BIN)-$(GOARCH)
|
||||
|
||||
# If you want to build all binaries, see the 'all-build' rule.
|
||||
# If you want to build all containers, see the 'all-container' rule.
|
||||
# If you want to build all containers, see the 'all-containers' rule.
|
||||
# If you want to build AND push all containers, see the 'all-push' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@@ -70,18 +87,25 @@ all:
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@$(MAKE) --no-print-directory ARCH=$* build BIN=velero-restic-restore-helper
|
||||
|
||||
#container-%:
|
||||
# @$(MAKE) --no-print-directory ARCH=$* container
|
||||
container-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* container
|
||||
@$(MAKE) --no-print-directory ARCH=$* container BIN=velero-restic-restore-helper
|
||||
|
||||
#push-%:
|
||||
# @$(MAKE) --no-print-directory ARCH=$* push
|
||||
push-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* push
|
||||
@$(MAKE) --no-print-directory ARCH=$* push BIN=velero-restic-restore-helper
|
||||
|
||||
all-build: $(addprefix build-, $(CLI_PLATFORMS))
|
||||
|
||||
#all-container: $(addprefix container-, $(CONTAINER_PLATFORMS))
|
||||
all-containers: $(addprefix container-, $(CONTAINER_PLATFORMS))
|
||||
|
||||
#all-push: $(addprefix push-, $(CONTAINER_PLATFORMS))
|
||||
all-push: $(addprefix push-, $(CONTAINER_PLATFORMS))
|
||||
|
||||
all-manifests:
|
||||
@$(MAKE) manifest
|
||||
@$(MAKE) manifest BIN=velero-restic-restore-helper
|
||||
|
||||
local: build-dirs
|
||||
GOOS=$(GOOS) \
|
||||
@@ -131,28 +155,11 @@ shell: build-dirs build-image
|
||||
|
||||
DOTFILE_IMAGE = $(subst :,_,$(subst /,_,$(IMAGE))-$(VERSION))
|
||||
|
||||
# Use a slightly customized build/push targets since we don't have a Go binary to build for the fsfreeze image
|
||||
build-fsfreeze: BIN = fsfreeze-pause
|
||||
build-fsfreeze:
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN).alpine
|
||||
@docker build --pull -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN).alpine _output
|
||||
@docker images -q $(IMAGE):$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
push-fsfreeze: BIN = fsfreeze-pause
|
||||
push-fsfreeze:
|
||||
@docker push $(IMAGE):$(VERSION)
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
docker push $(IMAGE):latest
|
||||
endif
|
||||
@docker images -q $(REGISTRY)/fsfreeze-pause:$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
all-containers:
|
||||
$(MAKE) container
|
||||
$(MAKE) container BIN=velero-restic-restore-helper
|
||||
$(MAKE) build-fsfreeze
|
||||
|
||||
container: .container-$(DOTFILE_IMAGE) container-name
|
||||
container: local-arch .container-$(DOTFILE_IMAGE) container-name
|
||||
.container-$(DOTFILE_IMAGE): _output/bin/$(GOOS)/$(GOARCH)/$(BIN) $(DOCKERFILE)
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH)
|
||||
@docker build --pull -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
|
||||
@@ -161,12 +168,6 @@ container: .container-$(DOTFILE_IMAGE) container-name
|
||||
container-name:
|
||||
@echo "container: $(IMAGE):$(VERSION)"
|
||||
|
||||
all-push:
|
||||
$(MAKE) push
|
||||
$(MAKE) push BIN=velero-restic-restore-helper
|
||||
$(MAKE) push-fsfreeze
|
||||
|
||||
|
||||
push: .push-$(DOTFILE_IMAGE) push-name
|
||||
.push-$(DOTFILE_IMAGE): .container-$(DOTFILE_IMAGE)
|
||||
@docker push $(IMAGE):$(VERSION)
|
||||
@@ -179,6 +180,20 @@ endif
|
||||
push-name:
|
||||
@echo "pushed: $(IMAGE):$(VERSION)"
|
||||
|
||||
manifest: .manifest-$(MULTIARCH_IMAGE) manifest-name
|
||||
.manifest-$(MULTIARCH_IMAGE):
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(MULTIARCH_IMAGE):$(VERSION) \
|
||||
$(foreach arch, $(MANIFEST_PLATFORMS), $(MULTIARCH_IMAGE)-$(arch):$(VERSION))
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push --purge $(MULTIARCH_IMAGE):$(VERSION)
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(MULTIARCH_IMAGE):latest \
|
||||
$(foreach arch, $(MANIFEST_PLATFORMS), $(MULTIARCH_IMAGE)-$(arch):latest)
|
||||
@DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push --purge $(MULTIARCH_IMAGE):latest
|
||||
endif
|
||||
|
||||
manifest-name:
|
||||
@echo "pushed: $(MULTIARCH_IMAGE):$(VERSION)"
|
||||
|
||||
SKIP_TESTS ?=
|
||||
test: build-dirs
|
||||
ifneq ($(SKIP_TESTS), 1)
|
||||
@@ -215,8 +230,27 @@ ci: all verify test
|
||||
changelog:
|
||||
hack/changelog.sh
|
||||
|
||||
# release builds a GitHub release using goreleaser within the build container.
|
||||
#
|
||||
# To dry-run the release, which will build the binaries/artifacts locally but
|
||||
# will *not* create a GitHub release:
|
||||
# GITHUB_TOKEN=an-invalid-token-so-you-dont-accidentally-push-release \
|
||||
# RELEASE_NOTES_FILE=changelogs/CHANGELOG-1.2.md \
|
||||
# PUBLISH=false \
|
||||
# make release
|
||||
#
|
||||
# To run the release, which will publish a *DRAFT* GitHub release in github.com/vmware-tanzu/velero
|
||||
# (you still need to review/publish the GitHub release manually):
|
||||
# GITHUB_TOKEN=your-github-token \
|
||||
# RELEASE_NOTES_FILE=changelogs/CHANGELOG-1.2.md \
|
||||
# PUBLISH=true \
|
||||
# make release
|
||||
release:
|
||||
hack/goreleaser.sh
|
||||
$(MAKE) shell CMD="-c '\
|
||||
GITHUB_TOKEN=$(GITHUB_TOKEN) \
|
||||
RELEASE_NOTES_FILE=$(RELEASE_NOTES_FILE) \
|
||||
PUBLISH=$(PUBLISH) \
|
||||
./hack/goreleaser.sh'"
|
||||
|
||||
serve-docs:
|
||||
docker run \
|
||||
|
||||
@@ -17,7 +17,7 @@ Velero consists of:
|
||||
|
||||
## Documentation
|
||||
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Velero, and more.
|
||||
[The documentation][29] provides a getting started guide and information about building from source, architecture, extending Velero, and more.
|
||||
|
||||
Please use the version selector at the top of the site to ensure you are using the appropriate documentation for your version of Velero.
|
||||
|
||||
@@ -44,7 +44,6 @@ See [the list of releases][6] to find out about feature changes.
|
||||
[14]: https://github.com/kubernetes/kubernetes
|
||||
[24]: https://groups.google.com/forum/#!forum/projectvelero
|
||||
[25]: https://kubernetes.slack.com/messages/velero
|
||||
[28]: https://velero.io/docs/install-overview
|
||||
[29]: https://velero.io/docs/
|
||||
[30]: https://velero.io/docs/troubleshooting
|
||||
[100]: https://velero.io/docs/master/img/velero.png
|
||||
@@ -1,5 +1,7 @@
|
||||
# Velero Support
|
||||
|
||||
Thanks for trying out Velero! We welcome all feedback, please consider joining our mailing list:
|
||||
Thanks for trying out Velero! We welcome all feedback, find all the ways to connect with us on our Community page:
|
||||
|
||||
- [Mailing List](https://groups.google.com/forum/#!forum/projectvelero)
|
||||
- [Velero Community](https://velero.io/community/)
|
||||
|
||||
You can find details on the Velero maintainers' support process [here](https://velero.io/docs/master/support-process/).
|
||||
|
||||
@@ -1,40 +1,60 @@
|
||||
## v1.2.0-beta.1
|
||||
#### 2019-10-24
|
||||
## v1.2.0
|
||||
#### 2019-11-07
|
||||
|
||||
### Download
|
||||
- https://github.com/vmware-tanzu/velero/releases/tag/v1.2.0-beta.1
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.2.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.2.0-beta.1`
|
||||
`velero/velero:v1.2.0`
|
||||
|
||||
Please note that as of this release we are no longer publishing new container images to `gcr.io/heptio-images`. The existing ones will remain there for the foreseeable future.
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.2.0-beta.1/
|
||||
https://velero.io/docs/v1.2.0/
|
||||
|
||||
### Upgrading
|
||||
|
||||
If you're upgrading from a previous version of Velero, there are several changes you'll need to be aware of:
|
||||
|
||||
- Container images are now published to Docker Hub. To upgrade your server, use the new image `velero/velero:v1.2.0-beta.1`.
|
||||
- The AWS, Microsoft Azure, and GCP provider plugins that were previously part of the Velero binary have been extracted to their own standalone repositories/plugin images. If you are using one of these three providers, you will need to explicitly add the appropriate plugin to your Velero install:
|
||||
- [AWS] `velero plugin add velero/velero-plugin-for-aws:v1.0.0-beta.1`
|
||||
- [Azure] `velero plugin add velero/velero-plugin-for-microsoft-azure:v1.0.0-beta.1`
|
||||
- [GCP] `velero plugin add velero/velero-plugin-for-gcp:v1.0.0-beta.1`
|
||||
https://velero.io/docs/v1.2.0/upgrade-to-1.2/
|
||||
|
||||
### Highlights
|
||||
## Moving Cloud Provider Plugins Out of Tree
|
||||
|
||||
- The AWS, Microsoft Azure, and GCP provider plugins that were previously part of the Velero binary have been extracted to their own standalone repositories/plugin images. They now function like any other provider plugin.
|
||||
- Container images are now published to Docker Hub: `velero/velero:v1.2.0-beta.1`.
|
||||
- Several improvements have been made to the restic integration:
|
||||
- Backup and restore progress is now updated on the `PodVolumeBackup` and `PodVolumeRestore` custom resources and viewable via `velero backup/restore describe` while operations are in progress.
|
||||
- Read-write-many PVCs are now only backed up once.
|
||||
- Backups of PVCs remain incremental across pod reschedules.
|
||||
- A structural schema has been added to the Velero CRDs that are created by `velero install` to enable validation of API fields.
|
||||
- During restores that use the `--namespace-mappings` flag to clone a namespace within a cluster, PVs will now be cloned as needed.
|
||||
Velero has had built-in support for AWS, Microsoft Azure, and Google Cloud Platform (GCP) since day 1. When Velero moved to a plugin architecture for object store providers and volume snapshotters in version 0.6, the code for these three providers was converted to use the plugin interface provided by this new architecture, but the cloud provider code still remained inside the Velero codebase. This put the AWS, Azure, and GCP plugins in a different position compared with other providers’ plugins, since they automatically shipped with the Velero binary and could include documentation in-tree.
|
||||
|
||||
With version 1.2, we’ve extracted the AWS, Azure, and GCP plugins into their own repositories, one per provider. We now also publish one plugin image per provider. This change brings these providers to parity with other providers’ plugin implementations, reduces the size of the core Velero binary by not requiring each provider’s SDK to be included, and opens the door for the plugins to be maintained and released independently of core Velero.
|
||||
|
||||
## Restic Integration Improvements
|
||||
|
||||
We’ve continued to work on improving Velero’s restic integration. With this release, we’ve made the following enhancements:
|
||||
|
||||
- Restic backup and restore progress is now captured during execution and visible to the user through the `velero backup/restore describe --details` command. The details are updated every 10 seconds. This provides a new level of visibility into restic operations for users.
|
||||
- Restic backups of persistent volume claims (PVCs) now remain incremental across the rescheduling of a pod. Previously, if the pod using a PVC was rescheduled, the next restic backup would require a full rescan of the volume’s contents. This improvement potentially makes such backups significantly faster.
|
||||
- Read-write-many volumes are no longer backed up once for every pod using the volume, but instead just once per Velero backup. This improvement speeds up backups and prevents potential restore issues due to multiple copies of the backup being processed simultaneously.
|
||||
|
||||
|
||||
## Clone PVs When Cloning a Namespace
|
||||
|
||||
Before version 1.2, you could clone a Kubernetes namespace by backing it up and then restoring it to a different namespace in the same cluster by using the `--namespace-mappings` flag with the `velero restore create` command. However, in this scenario, Velero was unable to clone persistent volumes used by the namespace, leading to errors for users.
|
||||
|
||||
In version 1.2, Velero automatically detects when you are trying to clone an existing namespace, and clones the persistent volumes used by the namespace as well. This doesn’t require the user to specify any additional flags for the `velero restore create` command. This change lets you fully achieve your goal of cloning namespaces using persistent storage within a cluster.
|
||||
|
||||
## Improved Server-Side Encryption Support
|
||||
|
||||
To help you secure your important backup data, we’ve added support for more forms of server-side encryption of backup data on both AWS and GCP. Specifically:
|
||||
- On AWS, Velero now supports Amazon S3-managed encryption keys (SSE-S3), which uses AES256 encryption, by specifying `serverSideEncryption: AES256` in a backup storage location’s config.
|
||||
- On GCP, Velero now supports using a specific Cloud KMS key for server-side encryption by specifying `kmsKeyName: <key name>` in a backup storage location’s config.
|
||||
|
||||
## CRD Structural Schema
|
||||
|
||||
In Kubernetes 1.16, custom resource definitions (CRDs) reached general availability. Structural schemas are required for CRDs created in the `apiextensions.k8s.io/v1` API group. Velero now defines a structural schema for each of its CRDs and automatically applies it the user runs the `velero install` command. The structural schemas enable the user to get quicker feedback when their backup, restore, or schedule request is invalid, so they can immediately remediate their request.
|
||||
|
||||
### All Changes
|
||||
* Ensure object store plugin processes are cleaned up after restore and after BSL validation during server start up (#2041, @betta1)
|
||||
* bug fix: don't try to restore pod volume backups that don't have a snapshot ID (#2031, @skriss)
|
||||
* Restore Documentation: Updated Restore Documentation with Clarification implications of removing restore object. (#1957, @nainav)
|
||||
* add `--allow-partially-failed` flag to `velero restore create` for use with `--from-schedule` to allow partially-failed backups to be restored (#1994, @skriss)
|
||||
* Allow backup storage locations to specify backup sync period or toggle off sync (#1936, @betta1)
|
||||
* Remove cloud provider code (#1985, @carlisia)
|
||||
* Restore action for cluster/namespace role bindings (#1974, @alexander)
|
||||
* Restore action for cluster/namespace role bindings (#1974, @alexander-demichev)
|
||||
* Add `--no-default-backup-location` flag to `velero install` (#1931, @Frank51)
|
||||
* If includeClusterResources is nil/auto, pull in necessary CRDs in backupResource (#1831, @sseago)
|
||||
* Azure: add support for Azure China/German clouds (#1938, @andyzhangx)
|
||||
@@ -61,4 +81,3 @@ If you're upgrading from a previous version of Velero, there are several changes
|
||||
* fix error formatting due interpreting % as printf formatted strings (#1781, @s12chung)
|
||||
* when using `velero restore create --namespace-mappings ...` to create a second copy of a namespace in a cluster, create copies of the PVs used (#1779, @skriss)
|
||||
* adds --from-schedule flag to the `velero create backup` command to create a Backup from an existing Schedule (#1734, @prydonius)
|
||||
* add `--allow-partially-failed` flag to `velero restore create` for use with `--from-schedule` to allow partially-failed backups to be restored (#1994, @skriss)
|
||||
|
||||
44
changelogs/CHANGELOG-1.3.md
Normal file
44
changelogs/CHANGELOG-1.3.md
Normal file
@@ -0,0 +1,44 @@
|
||||
## v1.3.0-beta.1
|
||||
#### 2020-02-04
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.3.0-beta.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.3.0-beta.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.3.0-beta.1/
|
||||
|
||||
### Upgrading
|
||||
```bash
|
||||
kubectl set image \
|
||||
--namespace velero \
|
||||
deployment/velero velero=velero/velero:v1.3.0-beta.1
|
||||
|
||||
# if using restic:
|
||||
kubectl set image \
|
||||
--namespace velero \
|
||||
daemonset/restic restic=velero/velero:v1.3.0-beta.1
|
||||
```
|
||||
|
||||
### All Changes
|
||||
* added support for arm and arm64 images (#2227, @shaneutt)
|
||||
* when restoring from a schedule, validate by checking for backup(s) labeled with the schedule name rather than existence of the schedule itself, to allow for restoring from deleted schedules and schedules in other clusters (#2218, @cpanato)
|
||||
* bug fix: back up server-preferred version of CRDs rather than always the `v1beta1` version (#2230, @skriss)
|
||||
* Wait for CustomResourceDefinitions to be ready before restoring CustomResources. Also refresh the resource list from the Kubernetes API server after restoring CRDs in order to properly restore CRs. (#1937, @nrb)
|
||||
* When restoring a v1 CRD with PreserveUnknownFields = True, make sure that the preservation behavior is maintained by copying the flag into the Open API V3 schema, but update the flag so as to allow the Kubernetes API server to accept the CRD without error. (#2197, @nrb)
|
||||
* Enable pruning unknown CRD fields (#2187, @jenting)
|
||||
* bump restic to 0.9.6 to fix some issues with non AWS standard regions (#2210, @Sh4d1)
|
||||
* bug fix: fix race condition resulting in restores sometimes succeeding despite restic restore failures (#2201, @skriss)
|
||||
* Bug fix: Check for nil LastMaintenanceTime in ResticRepository dueForMaintenance (#2200, @sseago)
|
||||
* repopulate backup_last_successful_timestamp metrics for each schedule after server restart (#2196, @skriss)
|
||||
* added support for ppc64le images and manifest lists (#1768, @prajyot)
|
||||
* bug fix: only prioritize restoring `replicasets.apps`, not `replicasets.extensions` (#2157, @skriss)
|
||||
* bug fix: restore both `replicasets.apps` *and* `replicasets.extensions` before `deployments` (#2120, @skriss)
|
||||
* bug fix: don't restore cluster-scoped resources when restoring specific namespaces and IncludeClusterResources is nil (#2118, @skriss)
|
||||
* Enableing Velero to switch credentials (`AWS_PROFILE`) if multiple s3-compatible backupLocations are present (#2096, @dinesh)
|
||||
* bug fix: deep-copy backup's labels when constructing snapshot tags, so the PV name isn't added as a label to the backup (#2075, @skriss)
|
||||
* remove the `fsfreeze-pause` image being published from this repo; replace it with `ubuntu:bionic` in the nginx example app (#2068, @skriss)
|
||||
* add support for a private registry with a custom port in a restic-helper image (#1999, @cognoz)
|
||||
* return better error message to user when cluster config can't be found via `--kubeconfig`, `$KUBECONFIG`, or in-cluster config (#2057, @skriss)
|
||||
@@ -1 +0,0 @@
|
||||
adds --from-schedule flag to the `velero create backup` command to create a Backup from an existing Schedule
|
||||
1
changelogs/unreleased/1768-prajyot-parab
Normal file
1
changelogs/unreleased/1768-prajyot-parab
Normal file
@@ -0,0 +1 @@
|
||||
added support for ppc64le images and manifest lists
|
||||
@@ -1 +0,0 @@
|
||||
when using `velero restore create --namespace-mappings ...` to create a second copy of a namespace in a cluster, create copies of the PVs used
|
||||
@@ -1 +0,0 @@
|
||||
fix error formatting due interpreting % as printf formatted strings
|
||||
@@ -1 +0,0 @@
|
||||
adds `insecureSkipTLSVerify` server config for AWS storage and `--insecure-skip-tls-verify` flag on client for self-signed certs
|
||||
@@ -1 +0,0 @@
|
||||
remove 'restic check' calls from before/after 'restic prune' since they're redundant
|
||||
@@ -1 +0,0 @@
|
||||
Add `--features` argument to all velero commands to provide feature flags that can control enablement of pre-release features.
|
||||
@@ -1 +0,0 @@
|
||||
when backing up PVCs with restic, specify --parent flag to prevent full volume rescans after pod reschedules
|
||||
@@ -1 +0,0 @@
|
||||
report backup progress in PodVolumeBackups and expose progress in the velero backup describe --details command. Also upgrades restic to v0.9.5
|
||||
@@ -1 +0,0 @@
|
||||
If includeClusterResources is nil/auto, pull in necessary CRDs in backupResource
|
||||
@@ -1 +0,0 @@
|
||||
fix excluding additional items with the velero.io/exclude-from-backup=true label
|
||||
@@ -1 +0,0 @@
|
||||
Jeckyll Site updates - modifies documentation to use a wider layout; adds better markdown table formatting
|
||||
@@ -1 +0,0 @@
|
||||
report restore progress in PodVolumeRestores and expose progress in the velero restore describe --details command
|
||||
@@ -1 +0,0 @@
|
||||
velero install: if `--use-restic` and `--wait` are specified, wait up to a minute for restic daemonset to be ready
|
||||
@@ -1 +0,0 @@
|
||||
change default `restic prune` interval to 7 days, add `velero server/install` flags for specifying an alternate default value.
|
||||
@@ -1 +0,0 @@
|
||||
AWS: add support for SSE-S3 AES256 encryption via `serverSideEncryption` config field in BackupStorageLocation
|
||||
@@ -1 +0,0 @@
|
||||
GCP: add support for specifying a Cloud KMS key name to use for encrypting backups in a storage location.
|
||||
@@ -1 +0,0 @@
|
||||
backup sync controller: stop using `metadata/revision` file, do a full diff of bucket contents vs. cluster contents each sync interval
|
||||
@@ -1 +0,0 @@
|
||||
Add LD_LIBRARY_PATH (=/plugins) to the env variables of velero deployment.
|
||||
@@ -1 +0,0 @@
|
||||
Azure: add support for cross-subscription backups
|
||||
@@ -1 +0,0 @@
|
||||
restic: only backup read-write-many PVCs at most once, even if they're annotated for backup from multiple pods.
|
||||
@@ -1 +0,0 @@
|
||||
adds structural schema to Velero CRDs created on Velero install, enabling validation of Velero API fields
|
||||
@@ -1 +0,0 @@
|
||||
Add check to update resource field during backupItem
|
||||
@@ -1 +0,0 @@
|
||||
bug fix: during restore, check item's original namespace, not the remapped one, for inclusion/exclusion
|
||||
@@ -1 +0,0 @@
|
||||
Add a new required --plugins flag for velero install command. --plugins takes a list of container images to add as initcontainers.
|
||||
@@ -1 +0,0 @@
|
||||
Add --no-default-backup-location flag to velero install
|
||||
@@ -1 +0,0 @@
|
||||
Allow backup storage locations to specify backup sync period or toggle off sync
|
||||
1
changelogs/unreleased/1937-nrb
Normal file
1
changelogs/unreleased/1937-nrb
Normal file
@@ -0,0 +1 @@
|
||||
Wait for CustomResourceDefinitions to be ready before restoring CustomResources. Also refresh the resource list from the Kubernetes API server after restoring CRDs in order to properly restore CRs.
|
||||
@@ -1 +0,0 @@
|
||||
Azure: add support for Azure China/German clouds
|
||||
@@ -1 +0,0 @@
|
||||
Restore action for cluster/namespace role bindings
|
||||
@@ -1 +0,0 @@
|
||||
Remove cloud provider code
|
||||
@@ -1 +0,0 @@
|
||||
add `--allow-partially-failed` flag to `velero restore create` for use with `--from-schedule` to allow partially-failed backups to be restored
|
||||
1
changelogs/unreleased/1999-cognoz
Normal file
1
changelogs/unreleased/1999-cognoz
Normal file
@@ -0,0 +1 @@
|
||||
add support for a private registry with a custom port in a restic-helper image
|
||||
1
changelogs/unreleased/2057-skriss
Normal file
1
changelogs/unreleased/2057-skriss
Normal file
@@ -0,0 +1 @@
|
||||
return better error message to user when cluster config can't be found via `--kubeconfig`, `$KUBECONFIG`, or in-cluster config
|
||||
1
changelogs/unreleased/2068-skriss
Normal file
1
changelogs/unreleased/2068-skriss
Normal file
@@ -0,0 +1 @@
|
||||
remove the `fsfreeze-pause` image being published from this repo; replace it with `ubuntu:bionic` in the nginx example app
|
||||
1
changelogs/unreleased/2075-skriss
Normal file
1
changelogs/unreleased/2075-skriss
Normal file
@@ -0,0 +1 @@
|
||||
bug fix: deep-copy backup's labels when constructing snapshot tags, so the PV name isn't added as a label to the backup
|
||||
1
changelogs/unreleased/2096-dinesh
Normal file
1
changelogs/unreleased/2096-dinesh
Normal file
@@ -0,0 +1 @@
|
||||
Enableing Velero to switch credentials (`AWS_PROFILE`) if multiple s3-compatible backupLocations are present
|
||||
1
changelogs/unreleased/2118-skriss
Normal file
1
changelogs/unreleased/2118-skriss
Normal file
@@ -0,0 +1 @@
|
||||
bug fix: don't restore cluster-scoped resources when restoring specific namespaces and IncludeClusterResources is nil
|
||||
1
changelogs/unreleased/2120-skriss
Normal file
1
changelogs/unreleased/2120-skriss
Normal file
@@ -0,0 +1 @@
|
||||
bug fix: restore both `replicasets.apps` *and* `replicasets.extensions` before `deployments`
|
||||
1
changelogs/unreleased/2157-skriss
Normal file
1
changelogs/unreleased/2157-skriss
Normal file
@@ -0,0 +1 @@
|
||||
bug fix: only prioritize restoring `replicasets.apps`, not `replicasets.extensions`
|
||||
1
changelogs/unreleased/2187-jenting
Normal file
1
changelogs/unreleased/2187-jenting
Normal file
@@ -0,0 +1 @@
|
||||
Enable pruning unknown CRD fields
|
||||
1
changelogs/unreleased/2196-skriss
Normal file
1
changelogs/unreleased/2196-skriss
Normal file
@@ -0,0 +1 @@
|
||||
repopulate backup_last_successful_timestamp metrics for each schedule after server restart
|
||||
1
changelogs/unreleased/2197-nrb
Normal file
1
changelogs/unreleased/2197-nrb
Normal file
@@ -0,0 +1 @@
|
||||
When restoring a v1 CRD with PreserveUnknownFields = True, make sure that the preservation behavior is maintained by copying the flag into the Open API V3 schema, but update the flag so as to allow the Kubernetes API server to accept the CRD without error.
|
||||
1
changelogs/unreleased/2200-sseago
Normal file
1
changelogs/unreleased/2200-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Bug fix: Check for nil LastMaintenanceTime in ResticRepository dueForMaintenance
|
||||
1
changelogs/unreleased/2201-skriss
Normal file
1
changelogs/unreleased/2201-skriss
Normal file
@@ -0,0 +1 @@
|
||||
bug fix: fix race condition resulting in restores sometimes succeeding despite restic restore failures
|
||||
1
changelogs/unreleased/2210-Sh4d1
Normal file
1
changelogs/unreleased/2210-Sh4d1
Normal file
@@ -0,0 +1 @@
|
||||
bump restic to 0.9.6 to fix some issues with non AWS standard regions
|
||||
1
changelogs/unreleased/2218-cpanato
Normal file
1
changelogs/unreleased/2218-cpanato
Normal file
@@ -0,0 +1 @@
|
||||
remove the schedule validation and instead of checking the schedule because we might be in another cluster we check if a backup exist with the schedule name.
|
||||
1
changelogs/unreleased/2227-shaneutt
Normal file
1
changelogs/unreleased/2227-shaneutt
Normal file
@@ -0,0 +1 @@
|
||||
added support for arm and arm64 images
|
||||
1
changelogs/unreleased/2230-skriss
Normal file
1
changelogs/unreleased/2230-skriss
Normal file
@@ -0,0 +1 @@
|
||||
bug fix: back up server-preferred version of CRDs rather than always the `v1beta1` version
|
||||
324
design/csi-snapshots.md
Normal file
324
design/csi-snapshots.md
Normal file
@@ -0,0 +1,324 @@
|
||||
# CSI Snapshot Support
|
||||
|
||||
The Container Storage Interface (CSI) [introduced an alpha snapshot API in Kubernetes v1.12][1].
|
||||
It will reach beta support in Kubernetes v1.17, scheduled for release in December 2019.
|
||||
This proposal documents an approach for integrating support for this snapshot API within Velero, augmenting its existing capabilities.
|
||||
|
||||
## Goals
|
||||
|
||||
- Enable Velero to backup and restore CSI-backed volumes using the Kubernetes CSI CustomResourceDefinition API
|
||||
|
||||
## Non Goals
|
||||
|
||||
- Replacing Velero's existing [VolumeSnapshotter][7] API
|
||||
- Replacing Velero's Restic support
|
||||
|
||||
## Background
|
||||
|
||||
Velero has had support for performing persistent volume snapshots since its inception.
|
||||
However, support has been limited to a handful of providers.
|
||||
The plugin API introduced in Velero v0.7 enabled the community to expand the number of supported providers.
|
||||
In the meantime, the Kubernetes sig-storage advanced the CSI spec to allow for a generic storage interface, opening up the possibility of moving storage code out of the core Kubernetes code base.
|
||||
The CSI working group has also developed a generic snapshotting API that any CSI driver developer may implement, giving users the ability to snapshot volumes from a standard interface.
|
||||
|
||||
By supporting the CSI snapshot API, Velero can extend its support to any CSI driver, without requiring a Velero-specific plugin be written, easing the development burden on providers while also reaching more end users.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
In order to support CSI's snapshot API, Velero must interact with the [`VolumeSnapshot`][2] and [`VolumeSnapshotContent`][3] CRDs.
|
||||
These act as requests to the CSI driver to perform a snapshot on the underlying provider's volume.
|
||||
This can largely be accomplished with Velero `BackupItemAction` and `RestoreItemAction` plugins that operate on these CRDs.
|
||||
|
||||
Additionally, changes to the Velero server and client code are necessary to track `VolumeSnapshot`s that are associated with a given backup, similarly to how Velero tracks its own [`volume.Snapshot`][4] type.
|
||||
Tracking these is important for allowing users to see what is in their backup, and provides parity for the existing `volume.Snapshot` and [`PodVolumeBackup`][5] types.
|
||||
This is also done to retain the object store as Velero's source of truth, without having to query the Kubernetes API server for associated `VolumeSnapshot`s.
|
||||
|
||||
`velero backup describe --details` will use the stored VolumeSnapshots to list CSI snapshots included in the backup to the user.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Resource Plugins
|
||||
|
||||
A set of [prototype][6] plugins was developed that informed this design.
|
||||
|
||||
The plugins will be as follows:
|
||||
|
||||
|
||||
#### A `BackupItemAction` for `PersistentVolumeClaim`s, named `velero.io/csi-pvc`
|
||||
|
||||
This plugin will act directly on PVCs, since an implementation of Velero's VolumeSnapshotter does not have enough information about the StorageClass to properly create the `VolumeSnapshot` objects.
|
||||
|
||||
The associated PV will be queried and checked for the presence of `PersistentVolume.Spec.PersistentVolumeSource.CSI`. (See the "Snapshot Mechanism Selection" section below).
|
||||
If this field is `nil`, then the plugin will return early without taking action.
|
||||
If the `Backup.Spec.SnapshotVolumes` value is `false`, the plugin will return early without taking action.
|
||||
|
||||
Additionally, to prevent creating CSI snapshots for volumes backed up by restic, the plugin will query for all pods in the `PersistentVolumeClaim`'s namespace.
|
||||
It will then filter out the pods that have the PVC mounted, and inspect the `backup.velero.io/backup-volumes` annotation for the associated volume's name.
|
||||
If the name is found in the list, then the plugin will return early without taking further action.
|
||||
|
||||
Create a `VolumeSnapshot.snapshot.storage.k8s.io` object from the PVC.
|
||||
Label the `VolumeSnapshot` object with the [`velero.io/backup-name`][10] label for ease of lookup later.
|
||||
Also set an ownerRef on the `VolumeSnapshot` so that cascading deletion of the Velero `Backup` will delete associated `VolumeSnapshots`.
|
||||
|
||||
The CSI controllers will create a `VolumeSnapshotContent.snapshot.storage.k8s.io` object associated with the `VolumeSnapshot`.
|
||||
|
||||
Associated `VolumeSnapshotContent` objects will be retrieved and updated with the [`velero.io/backup-name`][10] label for ease of lookup later.
|
||||
`velero.io/volume-snapshot-name` will be applied as a label to the PVC so that the `VolumeSnapshot` can be found easily for restore.
|
||||
|
||||
`VolumeSnapshot`, `VolumeSnapshotContent`, and `VolumeSnapshotClass` objects would be returned as additional items to be backed up. GitHub issue [1566][18] represents this work.
|
||||
|
||||
The `VolumeSnapshotContent.Spec.VolumeSnapshotSource.SnapshotHandle` field is the link to the underlying platform's on-disk snapshot, and must be preserved for restoration.
|
||||
|
||||
The plugin will _not_ wait for the `VolumeSnapshot.Status.readyToUse` field to be `true` before returning.
|
||||
This field indicates that the snapshot is ready to use for restoration, and for different vendors can indicate that the snapshot has been made durable.
|
||||
However, the applications can proceed as soon as `VolumeSnapshot.Status.CreationTime` is set.
|
||||
This also maintains current Velero behavior, which allows applications to quiesce and resume quickly, with minimal interruption.
|
||||
|
||||
Any sort of monitoring or waiting for durable snapshots, either Velero-native or CSI snapshots, are not covered by this proposal.
|
||||
|
||||
```
|
||||
K8s object relationships inside of the backup tarball
|
||||
+-----------------------+ +-----------------------+
|
||||
| PersistentVolumeClaim +-------------->+ PersistentVolume |
|
||||
+-----------+-----------+ +-----------+-----------+
|
||||
^ ^
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
+-----------+-----------+ +-----------+-----------+
|
||||
| VolumeSnapshot +<------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### A `RestoreItemAction` for `VolumeSnapshotContent` objects, named `velero.io/csi-vsc`
|
||||
|
||||
On restore, `VolumeSnapshotContent` objects are cleaned so that they may be properly associated with IDs assigned by the target cluster.
|
||||
|
||||
Only `VolumeSnapshotContent` objects with the `velero.io/backup-name` label will be processed, using the plugin's `AppliesTo` function.
|
||||
|
||||
The metadata (excluding labels), `PersistentVolumeClaim.UUID`, and `VolumeSnapshotRef.UUID` fields will be cleared.
|
||||
The reference fields are cleared because the associated objects will get new UUIDs in the cluster.
|
||||
This also maps to the "import" case of [the snapshot API][1].
|
||||
|
||||
This means the relationship between the `VolumeSnapshot` and `VolumeSnapshotContent` is
|
||||
one way until the CSI controllers rebind them.
|
||||
|
||||
|
||||
```
|
||||
K8s objects after the velero.io/csi-vsc plugin has run
|
||||
+-----------------------+ +-----------------------+
|
||||
| PersistentVolumeClaim +-------------->+ PersistentVolume |
|
||||
+-----------------------+ +-----------------------+
|
||||
|
||||
|
||||
+-----------------------+ +-----------------------+
|
||||
| VolumeSnapshot +-------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### A `RestoreItemAction` for `VolumeSnapshot` objects, named `velero.io/csi-vs`
|
||||
|
||||
`VolumeSnapshot` objects must be prepared for importing into the target cluster by removing IDs and metadata associated with their origin cluster.
|
||||
|
||||
Only `VolumeSnapshot` objects with the `velero.io/backup-name` label will be processed, using the plugin's `AppliesTo` function.
|
||||
|
||||
Metadata (excluding labels) and `Source` (that is, the pointer to the `PersistentVolumeClaim`) fields on the object will be cleared.
|
||||
The `VolumeSnapshot.Spec.SnapshotContentName` is the link back to the `VolumeSnapshotContent` object, and thus the actual snapshot.
|
||||
The `Source` field indicates that a new CSI snapshot operation should be performed, which isn't relevant on restore.
|
||||
This follows the "import" case of [the snapshot API][1].
|
||||
|
||||
The `Backup` associated with the `VolumeSnapshot` will be queried, and set as an ownerRef on the `VolumeSnapshot` so that deletion can cascade.
|
||||
|
||||
```
|
||||
+-----------------------+ +-----------------------+
|
||||
| PersistentVolumeClaim +-------------->+ PersistentVolume |
|
||||
+-----------------------+ +-----------------------+
|
||||
|
||||
|
||||
+-----------------------+ +-----------------------+
|
||||
| VolumeSnapshot +-------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### A `RestoreItemAction` for `PersistentVolumeClaim`s named `velero.io/csi-pvc`
|
||||
|
||||
On restore, `PersistentVolumeClaims` will need to be created from the snapshot, and thus will require editing before submission.
|
||||
|
||||
Only `PersistentVolumeClaim` objects with the `velero.io/volume-snapshot-name` label will be processed, using the plugin's `AppliesTo` function.
|
||||
Metadata (excluding labels) will be cleared, and the `velero.io/volume-snapshot-name` label will be used to find the relevant `VolumeSnapshot`.
|
||||
A reference to the `VolumeSnapshot` will be added to the `PersistentVolumeClaim.DataSource` field.
|
||||
|
||||
```
|
||||
+-----------------------+
|
||||
| PersistentVolumeClaim |
|
||||
+-----------------------+
|
||||
|
||||
+-----------------------+ +-----------------------+
|
||||
| VolumeSnapshot +-------------->+ VolumeSnapshotContent |
|
||||
+-----------------------+ +-----------------------+
|
||||
```
|
||||
|
||||
#### VolumeSnapshotClasses
|
||||
|
||||
No special logic is required to restore `VolumeSnapshotClass` objects.
|
||||
|
||||
These plugins should be provided with Velero, as there will also be some changes to core Velero code to enable association of a `Backup` to the included `VolumeSnapshot`s.
|
||||
|
||||
|
||||
|
||||
### Velero server changes
|
||||
|
||||
Any non-plugin code changes must be behind a `EnableCSI` feature flag and the behavior will be opt-in until it's exited beta status.
|
||||
This will allow the development to continue on the feature while it's in pre-production state, while also reducing the need for long-lived feature branches.
|
||||
|
||||
[`persistBackup`][8] will be extended to query for all `VolumeSnapshot`s associated with the backup, and persist the list to JSON.
|
||||
|
||||
[`BackupStore.PutBackup`][9] will receive an additional argument, `volumeSnapshots io.Reader`, that contains the JSON representation of `VolumeSnapshots`.
|
||||
This will be written to a file named `csi-snapshots.json.gz`.
|
||||
|
||||
[`defaultRestorePriorities`][11] should be rewritten to the following to accomodate proper association between the CSI objects and PVCs. `CustomResourceDefinition`s are moved up because they're necessary for creating the CSI CRDs. The CSI CRDs are created before `PersistentVolume`s and `PersistentVolumeClaim`s so that they may be used as data sources.
|
||||
GitHub issue [1565][17] represents this work.
|
||||
|
||||
```go
|
||||
var defaultRestorePriorities = []string{
|
||||
"namespaces",
|
||||
"storageclasses",
|
||||
"customresourcedefinitions",
|
||||
"volumesnapshotclass.snapshot.storage.k8s.io",
|
||||
"volumesnapshotcontents.snapshot.storage.k8s.io",
|
||||
"volumesnapshots.snapshot.storage.k8s.io",
|
||||
"persistentvolumes",
|
||||
"persistentvolumeclaims",
|
||||
"secrets",
|
||||
"configmaps",
|
||||
"serviceaccounts",
|
||||
"limitranges",
|
||||
"pods",
|
||||
"replicaset",
|
||||
}
|
||||
```
|
||||
### Restic and CSI interaction
|
||||
|
||||
Volumes found in a `Pod`'s `backup.velero.io/backup-volumes` list will use Velero's current Restic code path.
|
||||
This also means Velero will continue to offer Restic as an option for CSI volumes.
|
||||
|
||||
The `velero.io/csi-pvc` BackupItemAction plugin will inspect pods in the namespace to ensure that it does not act on PVCs already being backed up by restic.
|
||||
|
||||
This is preferred to modifying the PVC due to the fact that Velero's current backup process backs up PVCs and PVs mounted to pods at the same time as the pod.
|
||||
|
||||
A drawback to this approach is that we're querying all pods in the namespace per PVC, which could be a large number.
|
||||
In the future, the plugin interface could be improved to have some sort of context argument, so that additional data such as our existing `resticSnapshotTracker` could be passed to plugins and reduce work.
|
||||
|
||||
### Garbage collection and deletion
|
||||
|
||||
To ensure that all created resources are deleted when a backup expires or is deleted, `VolumeSnapshot`s will have an `ownerRef` defined pointing to the Velero backup that created them.
|
||||
|
||||
In order to fully delete these objects, each `VolumeSnapshotContent`s object will need to be edited to ensure the associated provider snapshot is deleted.
|
||||
This will be done by editing the object and setting `VolumeSnapshotContent.Spec.DeletionPolicy` to `Delete`, regardless of whether or not the default policy for the class is `Retain`.
|
||||
See the Deletion Policies section below.
|
||||
The edit will happen before making Kubernetes API deletion calls to ensure that the cascade works as expected.
|
||||
|
||||
Deleting a Velero `Backup` or any associated CSI object via `kubectl` is unsupported; data will be lost or orphaned if this is done.
|
||||
|
||||
### Other snapshots included in the backup
|
||||
|
||||
Since `VolumeSnapshot` and `VolumeSnapshotContent` objects are contained within a Velero backup tarball, it is possible that all CRDs and on-disk provider snapshots have been deleted, yet the CRDs are still within other Velero backup tarballs.
|
||||
Thus, when a Velero backup that contains these CRDs is restored, the `VolumeSnapshot` and `VolumeSnapshotContent` objects are restored into the cluster, the CSI controllers will attempt to reconcile their state, and there are two possible states when the on-disk snapshot has been deleted:
|
||||
|
||||
1) If the driver _does not_ support the `ListSnapshots` gRPC method, then the CSI controllers have no way of knowing how to find it, and sets the `VolumeSnapshot.Status.readyToUse` field to `true`.
|
||||
2) If the driver _does_ support the `ListSnapshots` gRPC method, then the CSI controllers will query the state of the on-disk snapshot, see it is missing, and set `VolumeSnapshot.Status.readyToUse` and `VolumeSnapshotContent.Status.readyToUse` fields to `false`.
|
||||
|
||||
## Velero client changes
|
||||
|
||||
To use CSI features, the Velero client must use the `EnableCSI` feature flag.
|
||||
|
||||
[`DescribeBackupStatus`][13] will be extended to download the `csi-snapshots.json.gz` file for processing. GitHub Issue [1568][19] captures this work.
|
||||
|
||||
A new `describeCSIVolumeSnapshots` function should be added to the [output][12] package that knows how to render the included `VolumeSnapshot` names referenced in the `csi-snapshots.json.gz` file.
|
||||
|
||||
### Snapshot selection mechanism
|
||||
|
||||
The most accurate, reliable way to detect if a PersistentVolume is a CSI volume is to check for a non-`nil` [`PersistentVolume.Spec.PersistentVolumeSource.CSI`][16] field.
|
||||
Using the [`volume.beta.kubernetes.io/storage-provisioner`][14] is not viable, since the usage is for any PVC that should be dynamically provisioned, and is _not_ limited to CSI implementations.
|
||||
It was [introduced with dynamic provisioning support][15] in 2016, predating CSI.
|
||||
|
||||
In the `BackupItemAction` for PVCs, the associated PV will be queried and checked for the presence of `PersistentVolume.Spec.PersistentVolumeSource.CSI`.
|
||||
Volumes with any other `PersistentVolumeSource` set will use Velero's current VolumeSnapshotter plugin code path.
|
||||
|
||||
### VolumeSnapshotLocations and VolumeSnapshotClasses
|
||||
|
||||
Velero uses its own `VolumeSnapshotLocation` CRDs to specify configuration options for a given storage system.
|
||||
In Velero, this often includes topology information such as regions or availibility zones, as well as credential information.
|
||||
|
||||
CSI volume snapshotting has a `VolumeSnapshotClass` CRD which also contains configuration options for a given storage system, but these options are not the same as those that Velero would use.
|
||||
Since CSI volume snapshotting is operating within the same storage system that manages the volumes already, it does not need the same topology or credential information that Velero does.
|
||||
|
||||
As such, when used with CSI volumes, Velero's `VolumeSnapshotLocation` CRDs are not relevant, and could be omitted.
|
||||
|
||||
This will create a separate path in our documentation for the time being, and should be called out explicitly.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
* Implementing similar logic in a Velero VolumeSnapshotter plugin was considered.
|
||||
However, this is inappropriate given CSI's data model, which requires a PVC/PV's StorageClass.
|
||||
Given the arguments to the VolumeSnapshotter interface, the plugin would have to instantiate its own client and do queries against the Kubernetes API server to get the necessary information.
|
||||
|
||||
This is unnecessary given the fact that the `BackupItemAction` and `RestoreItemAction` APIs can act directly on the appropriate objects.
|
||||
|
||||
Additionally, the VolumeSnapshotter plugins and CSI volume snapshot drivers overlap - both produce a snapshot on backup and a PersistentVolume on restore.
|
||||
Thus, there's not a logical place to fit the creation of VolumeSnapshot creation in the VolumeSnapshotter interface.
|
||||
|
||||
* Implement CSI logic directly in Velero core code.
|
||||
The plugins could be packaged separately, but that doesn't necessarily make sense with server and client changes being made to accomodate CSI snapshot lookup.
|
||||
|
||||
* Implementing the CSI logic entirely in external plugins.
|
||||
As mentioned above, the necessary plugins for `PersistentVolumeClaim`, `VolumeSnapshot`, and `VolumeSnapshotContent` could be hosted out-out-of-tree from Velero.
|
||||
In fact, much of the logic for creating the CSI objects will be driven entirely inside of the plugin implementation.
|
||||
|
||||
However, Velero currently has no way for plugins to communicate that some arbitrary data should be stored in or retrieved from object storage, such as list of all `VolumeSnapshot` objects associated with a given `Backup`.
|
||||
This is important, because to display snapshots included in a backup, whether as native snapshots or Restic backups, separate JSON-encoded lists are stored within the backup on object storage.
|
||||
Snapshots are not listed directly on the `Backup` to fit within the etcd size limitations.
|
||||
Additionally, there are no client-side Velero plugin mechanisms, which means that the `velero describe backup --details` command would have no way of displaying the objects to the user, even if they were stored.
|
||||
|
||||
## Deletion Policies
|
||||
|
||||
In order for underlying, provider-level snapshots to be retained similarly to Velero's current functionality, the `VolumeSnapshotContent.Spec.DeletionPolicy` field must be set to `Retain`.
|
||||
|
||||
This is most easily accomplished by setting the `VolumeSnapshotClass.DeletionPolicy` field to `Retain`, which will be inherited by all `VolumeSnapshotContent` objects associated with the `VolumeSnapshotClass`.
|
||||
|
||||
The current default for dynamically provisioned `VolumeSnapshotContent` objects is `Delete`, which will delete the provider-level snapshot when the `VolumeSnapshotContent` object representing it is deleted.
|
||||
Additionally, the `Delete` policy will cascade a deletion of a `VolumeSnapshot`, removing the associated `VolumeSnapshotContent` object.
|
||||
|
||||
It is not currently possible to define a deletion policy on a `VolumeSnapshot` that gets passed to a `VolumeSnapshotContent` object on an individual basis.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
This proposal does not significantly change Velero's security implications within a cluster.
|
||||
|
||||
If a deployment is using solely CSI volumes, Velero will no longer need privileges to interact with volumes or snapshots, as these will be handled by the CSI driver.
|
||||
This reduces the provider permissions footprint of Velero.
|
||||
|
||||
Velero must still be able to access cluster-scoped resources in order to back up `VolumeSnapshotContent` objects.
|
||||
Without these objects, the provider-level snapshots cannot be located in order to re-associate them with volumes in the event of a restore.
|
||||
|
||||
|
||||
|
||||
[1]: https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/
|
||||
[2]: https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/apis/volumesnapshot/v1alpha1/types.go#L41
|
||||
[3]: https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/apis/volumesnapshot/v1alpha1/types.go#L161
|
||||
[4]: https://github.com/heptio/velero/blob/master/pkg/volume/snapshot.go#L21
|
||||
[5]: https://github.com/heptio/velero/blob/master/pkg/apis/velero/v1/pod_volume_backup.go#L88
|
||||
[6]: https://github.com/heptio/velero-csi-plugin/
|
||||
[7]: https://github.com/heptio/velero/blob/master/pkg/plugin/velero/volume_snapshotter.go#L26
|
||||
[8]: https://github.com/heptio/velero/blob/master/pkg/controller/backup_controller.go#L560
|
||||
[9]: https://github.com/heptio/velero/blob/master/pkg/persistence/object_store.go#L46
|
||||
[10]: https://github.com/heptio/velero/blob/master/pkg/apis/velero/v1/labels_annotations.go#L21
|
||||
[11]: https://github.com/heptio/velero/blob/master/pkg/cmd/server/server.go#L471
|
||||
[12]: https://github.com/heptio/velero/blob/master/pkg/cmd/util/output/backup_describer.go
|
||||
[13]: https://github.com/heptio/velero/blob/master/pkg/cmd/util/output/backup_describer.go#L214
|
||||
[14]: https://github.com/kubernetes/kubernetes/blob/8ea9edbb0290e9de1e6d274e816a4002892cca6f/pkg/controller/volume/persistentvolume/util/util.go#L69
|
||||
[15]: https://github.com/kubernetes/kubernetes/pull/30285
|
||||
[16]: https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/types.go#L237
|
||||
[17]: https://github.com/heptio/velero/issues/1565
|
||||
[18]: https://github.com/heptio/velero/issues/1566
|
||||
[19]: https://github.com/heptio/velero/issues/1568
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.7.9
|
||||
- image: nginx:1.17.6
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
@@ -29,6 +29,7 @@ metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
# Optional:
|
||||
# storageClassName: <YOUR_STORAGE_CLASS_NAME>
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -62,7 +63,7 @@ spec:
|
||||
persistentVolumeClaim:
|
||||
claimName: nginx-logs
|
||||
containers:
|
||||
- image: nginx:1.7.9
|
||||
- image: nginx:1.17.6
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -70,7 +71,7 @@ spec:
|
||||
- mountPath: "/var/log/nginx"
|
||||
name: nginx-logs
|
||||
readOnly: false
|
||||
- image: velero/fsfreeze-pause:latest
|
||||
- image: ubuntu:bionic
|
||||
name: fsfreeze
|
||||
securityContext:
|
||||
privileged: true
|
||||
@@ -78,7 +79,12 @@ spec:
|
||||
- mountPath: "/var/log/nginx"
|
||||
name: nginx-logs
|
||||
readOnly: false
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "-c"
|
||||
- "sleep infinity"
|
||||
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2018, 2019 the Velero contributors.
|
||||
# Copyright 2018, 2019, 2020 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,15 +17,17 @@ FROM golang:1.12
|
||||
RUN mkdir -p /go/src/k8s.io
|
||||
WORKDIR /go/src/k8s.io
|
||||
RUN git config --global advice.detachedHead false
|
||||
RUN git clone -b kubernetes-1.15.3 https://github.com/kubernetes/code-generator
|
||||
RUN git clone -b kubernetes-1.15.3 https://github.com/kubernetes/apimachinery
|
||||
RUN git clone -b kubernetes-1.17.0 https://github.com/kubernetes/code-generator
|
||||
RUN git clone -b kubernetes-1.17.0 https://github.com/kubernetes/apimachinery
|
||||
# Use a proxy for go modules to reduce the likelihood of various hosts being down and breaking the build
|
||||
ENV GOPROXY=https://proxy.golang.org
|
||||
# vendor code-generator go modules to be compatible with pre-1.15
|
||||
WORKDIR /go/src/k8s.io/code-generator
|
||||
# Don't use ENV here because we don't want to disable modules for subsequent commands
|
||||
RUN GO111MODULE=on go mod vendor
|
||||
RUN mkdir -p /go/src/sigs.k8s.io
|
||||
WORKDIR /go/src/sigs.k8s.io
|
||||
RUN git clone -b v0.2.2 https://github.com/kubernetes-sigs/controller-tools
|
||||
RUN git clone -b v0.2.4 https://github.com/kubernetes-sigs/controller-tools
|
||||
WORKDIR /go/src/sigs.k8s.io/controller-tools
|
||||
RUN GO111MODULE=on go mod vendor
|
||||
RUN go get golang.org/x/tools/cmd/goimports
|
||||
@@ -44,3 +46,7 @@ WORKDIR /go/src/github.com/golang
|
||||
RUN git clone -b v1.0.0 https://github.com/golang/protobuf
|
||||
WORKDIR /go/src/github.com/golang/protobuf
|
||||
RUN go install ./protoc-gen-go
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.120.8/goreleaser_Linux_x86_64.tar.gz && \
|
||||
tar xvf goreleaser_Linux_x86_64.tar.gz && \
|
||||
mv goreleaser /usr/bin/goreleaser && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
@@ -80,4 +80,4 @@ unset GIT_HTTP_USER_AGENT
|
||||
|
||||
echo "Building and pushing container images."
|
||||
|
||||
VERSION="$VERSION" TAG_LATEST="$TAG_LATEST" make all-containers all-push
|
||||
VERSION="$VERSION" TAG_LATEST="$TAG_LATEST" make all-containers all-push all-manifests
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# gcr-push is invoked by the CI/CD system to deploy docker images to Google Container Registry.
|
||||
# It will build images for all commits to master and all git tags.
|
||||
# The highest, non-prerelease semantic version will also be given the `latest` tag.
|
||||
|
||||
set +x
|
||||
|
||||
if [[ -z "$TRAVIS" ]]; then
|
||||
echo "This script is intended to be run only on Travis." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Return value is written into HIGHEST
|
||||
HIGHEST=""
|
||||
function highest_release() {
|
||||
# Loop through the tags since pre-release versions come before the actual versions.
|
||||
# Iterate til we find the first non-pre-release
|
||||
|
||||
# This is not necessarily the most recently made tag; instead, we want it to be the highest semantic version.
|
||||
# The most recent tag could potentially be a lower semantic version, made as a point release for a previous series.
|
||||
# As an example, if v1.3.0 exists and we create v1.2.2, v1.3.0 should still be `latest`.
|
||||
# `git describe --tags $(git rev-list --tags --max-count=1)` would return the most recently made tag.
|
||||
|
||||
for t in $(git tag -l --sort=-v:refname);
|
||||
do
|
||||
# If the tag has alpha, beta or rc in it, it's not "latest"
|
||||
if [[ "$t" == *"beta"* || "$t" == *"alpha"* || "$t" == *"rc"* ]]; then
|
||||
continue
|
||||
fi
|
||||
HIGHEST="$t"
|
||||
break
|
||||
done
|
||||
}
|
||||
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
VERSION="$BRANCH"
|
||||
elif [[ ! -z "$TRAVIS_TAG" ]]; then
|
||||
# Tags aren't fetched by Travis on checkout, and we don't need them for master
|
||||
git fetch --tags
|
||||
# Calculate the latest release if there's a tag.
|
||||
highest_release
|
||||
VERSION="$TRAVIS_TAG"
|
||||
else
|
||||
# If we're not on master and we're not building a tag, exit early.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
# Assume we're not tagging `latest` by default, and never on master.
|
||||
TAG_LATEST=false
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
echo "Building master, not tagging latest."
|
||||
elif [[ "$TRAVIS_TAG" == "$HIGHEST" ]]; then
|
||||
TAG_LATEST=true
|
||||
fi
|
||||
|
||||
# Debugging info
|
||||
echo "Highest tag found: $HIGHEST"
|
||||
echo "BRANCH: $BRANCH"
|
||||
echo "TRAVIS_TAG: $TRAVIS_TAG"
|
||||
echo "TAG_LATEST: $TAG_LATEST"
|
||||
|
||||
openssl aes-256-cbc -K $encrypted_f58ab4413c21_key -iv $encrypted_f58ab4413c21_iv -in heptio-images-fac92d2303ac.json.enc -out heptio-images-fac92d2303ac.json -d
|
||||
gcloud auth activate-service-account --key-file heptio-images-fac92d2303ac.json
|
||||
gcloud auth configure-docker -q
|
||||
unset GIT_HTTP_USER_AGENT
|
||||
|
||||
echo "Building and pushing container images."
|
||||
|
||||
VERSION="$VERSION" TAG_LATEST="$TAG_LATEST" make all-containers all-push
|
||||
34
hack/get-restic-ppc64le.sh
Executable file
34
hack/get-restic-ppc64le.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [ -z "${RESTIC_VERSION}" ]; then
|
||||
echo "RESTIC_VERSION must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "_output/bin/linux/ppc64le/" ]; then
|
||||
mkdir -p _output/bin/linux/ppc64le/
|
||||
fi
|
||||
|
||||
wget --quiet https://oplab9.parqtec.unicamp.br/pub/ppc64el/restic/restic-${RESTIC_VERSION}
|
||||
mv restic-${RESTIC_VERSION} _output/bin/linux/ppc64le/restic
|
||||
chmod +x _output/bin/linux/ppc64le/restic
|
||||
|
||||
@@ -42,7 +42,7 @@ ${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \
|
||||
$@
|
||||
|
||||
go run ${GOPATH}/src/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go \
|
||||
crd \
|
||||
crd:crdVersions=v1beta1,preserveUnknownFields=false \
|
||||
output:dir=pkg/generated/crds/manifests \
|
||||
paths=./pkg/apis/velero/v1/...
|
||||
|
||||
|
||||
Binary file not shown.
@@ -25,7 +25,7 @@ type DownloadRequestSpec struct {
|
||||
}
|
||||
|
||||
// DownloadTargetKind represents what type of file to download.
|
||||
// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshot;BackupResourceList;RestoreLog;RestoreResults
|
||||
// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshots;BackupResourceList;RestoreLog;RestoreResults
|
||||
type DownloadTargetKind string
|
||||
|
||||
const (
|
||||
|
||||
@@ -1741,7 +1741,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "persistent volume with zone annotation creates a snapshot",
|
||||
name: "persistent volume with deprecated zone annotation creates a snapshot",
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{
|
||||
@@ -1774,6 +1774,74 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "persistent volume with GA zone annotation creates a snapshot",
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("topology.kubernetes.io/zone", "zone-1")).Result(),
|
||||
),
|
||||
},
|
||||
snapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1", "type-1", 100, false),
|
||||
},
|
||||
want: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
ProviderVolumeID: "vol-1",
|
||||
VolumeAZ: "zone-1",
|
||||
VolumeType: "type-1",
|
||||
VolumeIOPS: int64Ptr(100),
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "vol-1-snapshot",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "persistent volume with both GA and deprecated zone annotation creates a snapshot and should use the GA",
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabelsMap(map[string]string{"failure-domain.beta.kubernetes.io/zone": "zone-1-deprecated", "topology.kubernetes.io/zone": "zone-1-ga"})).Result(),
|
||||
),
|
||||
},
|
||||
snapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1-ga", "type-1", 100, false),
|
||||
},
|
||||
want: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
ProviderVolumeID: "vol-1",
|
||||
VolumeAZ: "zone-1-ga",
|
||||
VolumeType: "type-1",
|
||||
VolumeIOPS: int64Ptr(100),
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "vol-1-snapshot",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error returned from CreateSnapshot results in a failed snapshot",
|
||||
req: &Request{
|
||||
|
||||
@@ -384,9 +384,14 @@ func (ib *defaultItemBackupper) volumeSnapshotter(snapshotLocation *api.VolumeSn
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// zoneLabelDeprecated is the label that stores availability-zone info
|
||||
// on PVs this is deprecated on Kubernetes >= 1.17.0
|
||||
// zoneLabel is the label that stores availability-zone info
|
||||
// on PVs
|
||||
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
|
||||
const (
|
||||
zoneLabelDeprecated = "failure-domain.beta.kubernetes.io/zone"
|
||||
zoneLabel = "topology.kubernetes.io/zone"
|
||||
)
|
||||
|
||||
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
|
||||
@@ -415,9 +420,15 @@ func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log log
|
||||
}
|
||||
}
|
||||
|
||||
pvFailureDomainZone := pv.Labels[zoneLabel]
|
||||
if pvFailureDomainZone == "" {
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
|
||||
// TODO: -- once failure-domain.beta.kubernetes.io/zone is no longer
|
||||
// supported in any velero-supported version of Kubernetes, remove fallback checking of it
|
||||
pvFailureDomainZone, labelFound := pv.Labels[zoneLabel]
|
||||
if !labelFound {
|
||||
log.Infof("label %q is not present on PersistentVolume, checking deprecated label...", zoneLabel)
|
||||
pvFailureDomainZone, labelFound = pv.Labels[zoneLabelDeprecated]
|
||||
if !labelFound {
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabelDeprecated)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -456,9 +467,10 @@ func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log log
|
||||
|
||||
log = log.WithField("volumeID", volumeID)
|
||||
|
||||
tags := ib.backupRequest.GetLabels()
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
// create tags from the backup's labels
|
||||
tags := map[string]string{}
|
||||
for k, v := range ib.backupRequest.GetLabels() {
|
||||
tags[k] = v
|
||||
}
|
||||
tags["velero.io/backup"] = ib.backupRequest.Name
|
||||
tags["velero.io/pv"] = pv.Name
|
||||
|
||||
@@ -19,7 +19,6 @@ package backup
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -297,34 +296,42 @@ func (rb *defaultResourceBackupper) backupItem(
|
||||
return backedUpItem
|
||||
}
|
||||
|
||||
// Adds CRD to the backup if one is found corresponding to this resource
|
||||
func (rb *defaultResourceBackupper) backupCRD(
|
||||
log logrus.FieldLogger,
|
||||
gr schema.GroupResource,
|
||||
itemBackupper ItemBackupper,
|
||||
) {
|
||||
crdGr := schema.GroupResource{Group: apiextv1beta1.GroupName, Resource: "customresourcedefinitions"}
|
||||
crdClient, err := rb.dynamicFactory.ClientForGroupVersionResource(apiextv1beta1.SchemeGroupVersion,
|
||||
metav1.APIResource{
|
||||
Name: "customresourcedefinitions",
|
||||
Namespaced: false,
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting dynamic client for CRDs")
|
||||
return
|
||||
}
|
||||
// backupCRD checks if the resource is a custom resource, and if so, backs up the custom resource definition
|
||||
// associated with it.
|
||||
func (rb *defaultResourceBackupper) backupCRD(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper ItemBackupper) {
|
||||
crdGroupResource := kuberesource.CustomResourceDefinitions
|
||||
|
||||
unstructured, err := crdClient.Get(gr.String(), metav1.GetOptions{})
|
||||
log.Debugf("Getting server preferred API version for %s", crdGroupResource)
|
||||
gvr, apiResource, err := rb.discoveryHelper.ResourceFor(crdGroupResource.WithVersion(""))
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting CRD")
|
||||
}
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting resolved resource for %s", crdGroupResource)
|
||||
return
|
||||
}
|
||||
log.Infof("Found associated CRD to add to backup %d", gr.String())
|
||||
_ = rb.backupItem(log, crdGr, itemBackupper, unstructured)
|
||||
log.Debugf("Got server preferred API version %s for %s", gvr.Version, crdGroupResource)
|
||||
|
||||
log.Debugf("Getting dynamic client for %s", gvr.String())
|
||||
crdClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), apiResource, "")
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting dynamic client for %s", crdGroupResource)
|
||||
return
|
||||
}
|
||||
log.Debugf("Got dynamic client for %s", gvr.String())
|
||||
|
||||
// try to get a CRD whose name matches the provided GroupResource
|
||||
unstructured, err := crdClient.Get(gr.String(), metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
// not found: this means the GroupResource provided was not a
|
||||
// custom resource, so there's no CRD to back up.
|
||||
log.Debugf("No CRD found for GroupResource %s", gr.String())
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting CRD %s", gr.String())
|
||||
return
|
||||
}
|
||||
log.Infof("Found associated CRD %s to add to backup", gr.String())
|
||||
|
||||
rb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured)
|
||||
}
|
||||
|
||||
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
||||
|
||||
@@ -164,6 +164,12 @@ func (b *BackupBuilder) StartTimestamp(val time.Time) *BackupBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
// CompletionTimestamp sets the Backup's completion timestamp.
|
||||
func (b *BackupBuilder) CompletionTimestamp(val time.Time) *BackupBuilder {
|
||||
b.object.Status.CompletionTimestamp = &metav1.Time{Time: val}
|
||||
return b
|
||||
}
|
||||
|
||||
// Hooks sets the Backup's hooks.
|
||||
func (b *BackupBuilder) Hooks(hooks velerov1api.BackupHooks) *BackupBuilder {
|
||||
b.object.Spec.Hooks = hooks
|
||||
|
||||
@@ -41,12 +41,18 @@ func ForCustomResourceDefinition(name string) *CustomResourceDefinitionBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
// Condition adds a CustomResourceDefinitionCondition objects to a CustomResourceDefinitionBuilder.
|
||||
func (c *CustomResourceDefinitionBuilder) Condition(cond apiextv1beta1.CustomResourceDefinitionCondition) *CustomResourceDefinitionBuilder {
|
||||
c.object.Status.Conditions = append(c.object.Status.Conditions, cond)
|
||||
return c
|
||||
}
|
||||
|
||||
// Result returns the built CustomResourceDefinition.
|
||||
func (b *CustomResourceDefinitionBuilder) Result() *apiextv1beta1.CustomResourceDefinition {
|
||||
return b.object
|
||||
}
|
||||
|
||||
// ObjectMeta applies functional options to the Namespace's ObjectMeta.
|
||||
// ObjectMeta applies functional options to the CustomResourceDefinition's ObjectMeta.
|
||||
func (b *CustomResourceDefinitionBuilder) ObjectMeta(opts ...ObjectMetaOpt) *CustomResourceDefinitionBuilder {
|
||||
for _, opt := range opts {
|
||||
opt(b.object)
|
||||
@@ -54,3 +60,32 @@ func (b *CustomResourceDefinitionBuilder) ObjectMeta(opts ...ObjectMetaOpt) *Cus
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// CustomResourceDefinitionConditionBuilder builds CustomResourceDefinitionCondition objects.
|
||||
type CustomResourceDefinitionConditionBuilder struct {
|
||||
object apiextv1beta1.CustomResourceDefinitionCondition
|
||||
}
|
||||
|
||||
// ForCustomResourceDefinitionConditionBuilder is the construction for a CustomResourceDefinitionConditionBuilder.
|
||||
func ForCustomResourceDefinitionCondition() *CustomResourceDefinitionConditionBuilder {
|
||||
return &CustomResourceDefinitionConditionBuilder{
|
||||
object: apiextv1beta1.CustomResourceDefinitionCondition{},
|
||||
}
|
||||
}
|
||||
|
||||
// Type sets the Condition's type.
|
||||
func (c *CustomResourceDefinitionConditionBuilder) Type(t apiextv1beta1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionBuilder {
|
||||
c.object.Type = t
|
||||
return c
|
||||
}
|
||||
|
||||
// Status sets the Condition's status.
|
||||
func (c *CustomResourceDefinitionConditionBuilder) Status(cs apiextv1beta1.ConditionStatus) *CustomResourceDefinitionConditionBuilder {
|
||||
c.object.Status = cs
|
||||
return c
|
||||
}
|
||||
|
||||
// Results returns the built CustomResourceDefinitionCondition.
|
||||
func (b *CustomResourceDefinitionConditionBuilder) Result() apiextv1beta1.CustomResourceDefinitionCondition {
|
||||
return b.object
|
||||
}
|
||||
|
||||
@@ -34,19 +34,19 @@ func Config(kubeconfig, kubecontext, baseName string, qps float32, burst int) (*
|
||||
loadingRules.ExplicitPath = kubeconfig
|
||||
configOverrides := &clientcmd.ConfigOverrides{CurrentContext: kubecontext}
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
|
||||
clientConfig, err := kubeConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error finding Kubernetes API server config in --kubeconfig, $KUBECONFIG, or in-cluster configuration")
|
||||
}
|
||||
|
||||
if qps > 0.0 {
|
||||
clientConfig.QPS = qps
|
||||
}
|
||||
|
||||
if burst > 0 {
|
||||
clientConfig.Burst = burst
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
clientConfig.UserAgent = buildUserAgent(
|
||||
baseName,
|
||||
buildinfo.Version,
|
||||
|
||||
@@ -69,11 +69,11 @@ func SaveConfig(config VeleroConfig) error {
|
||||
|
||||
// Try to make the directory in case it doesn't exist
|
||||
dir := filepath.Dir(fileName)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
configFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||
configFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
@@ -66,6 +67,7 @@ type InstallOptions struct {
|
||||
DefaultResticMaintenanceFrequency time.Duration
|
||||
Plugins flag.StringArray
|
||||
NoDefaultBackupLocation bool
|
||||
CRDsOnly bool
|
||||
}
|
||||
|
||||
// BindFlags adds command line values to the options struct.
|
||||
@@ -96,6 +98,7 @@ func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&o.Wait, "wait", o.Wait, "wait for Velero deployment to be ready. Optional.")
|
||||
flags.DurationVar(&o.DefaultResticMaintenanceFrequency, "default-restic-prune-frequency", o.DefaultResticMaintenanceFrequency, "how often 'restic prune' is run for restic repositories by default. Optional.")
|
||||
flags.Var(&o.Plugins, "plugins", "Plugin container images to install into the Velero Deployment")
|
||||
flags.BoolVar(&o.CRDsOnly, "crds-only", o.CRDsOnly, "only generate CustomResourceDefinition resources. Useful for updating CRDs for an existing Velero install.")
|
||||
}
|
||||
|
||||
// NewInstallOptions instantiates a new, default InstallOptions struct.
|
||||
@@ -118,6 +121,7 @@ func NewInstallOptions() *InstallOptions {
|
||||
// Default to creating a VSL unless we're told otherwise
|
||||
UseVolumeSnapshots: true,
|
||||
NoDefaultBackupLocation: false,
|
||||
CRDsOnly: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,19 +195,22 @@ Use '--wait' to wait for the Velero Deployment to be ready before proceeding.
|
||||
Use '-o yaml' or '-o json' with '--dry-run' to output all generated resources as text instead of sending the resources to the server.
|
||||
This is useful as a starting point for more customized installations.
|
||||
`,
|
||||
Example: ` # velero install --bucket mybucket --provider gcp --secret-file ./gcp-service-account.json
|
||||
Example: ` # velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket mybucket --secret-file ./gcp-service-account.json
|
||||
|
||||
# velero install --bucket backups --provider aws --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2
|
||||
# velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2
|
||||
|
||||
# velero install --bucket backups --provider aws --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2 --use-restic
|
||||
# velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2 --use-restic
|
||||
|
||||
# velero install --bucket gcp-backups --provider gcp --secret-file ./gcp-creds.json --wait
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --wait
|
||||
|
||||
# velero install --bucket backups --provider aws --backup-location-config region=us-west-2 --snapshot-location-config region=us-west-2 --no-secret --pod-annotations iam.amazonaws.com/role=arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
# velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --backup-location-config region=us-west-2 --snapshot-location-config region=us-west-2 --no-secret --pod-annotations iam.amazonaws.com/role=arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
|
||||
# velero install --bucket gcp-backups --provider gcp --secret-file ./gcp-creds.json --velero-pod-cpu-request=1000m --velero-pod-cpu-limit=5000m --velero-pod-mem-request=512Mi --velero-pod-mem-limit=1024Mi
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --velero-pod-cpu-request=1000m --velero-pod-cpu-limit=5000m --velero-pod-mem-request=512Mi --velero-pod-mem-limit=1024Mi
|
||||
|
||||
# velero install --bucket gcp-backups --provider gcp --secret-file ./gcp-creds.json --restic-pod-cpu-request=1000m --restic-pod-cpu-limit=5000m --restic-pod-mem-request=512Mi --restic-pod-mem-limit=1024Mi
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --restic-pod-cpu-request=1000m --restic-pod-cpu-limit=5000m --restic-pod-mem-request=512Mi --restic-pod-mem-limit=1024Mi
|
||||
|
||||
# velero install --provider azure --plugins velero/velero-plugin-for-microsoft-azure:v1.0.0 --bucket $BLOB_CONTAINER --secret-file ./credentials-velero \
|
||||
--backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] --snapshot-location-config apiTimeout=<YOUR_TIMEOUT>[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID]
|
||||
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
@@ -222,14 +229,19 @@ This is useful as a starting point for more customized installations.
|
||||
|
||||
// Run executes a command in the context of the provided arguments.
|
||||
func (o *InstallOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
vo, err := o.AsVeleroOptions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resources *unstructured.UnstructuredList
|
||||
if o.CRDsOnly {
|
||||
resources = install.AllCRDs()
|
||||
} else {
|
||||
vo, err := o.AsVeleroOptions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resources, err := install.AllResources(vo)
|
||||
if err != nil {
|
||||
return err
|
||||
resources, err = install.AllResources(vo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := output.PrintWithFormat(c, resources); err != nil {
|
||||
@@ -289,6 +301,11 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact
|
||||
return err
|
||||
}
|
||||
|
||||
// If we're only installing CRDs, we can skip the rest of the validation.
|
||||
if o.CRDsOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Our main 3 providers don't support bucket names starting with a dash, and a bucket name starting with one
|
||||
// can indicate that an environment variable was left blank.
|
||||
// This case will help catch that error
|
||||
|
||||
@@ -171,9 +171,13 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
|
||||
return err
|
||||
}
|
||||
case o.ScheduleName != "":
|
||||
if _, err := o.client.VeleroV1().Schedules(f.Namespace()).Get(o.ScheduleName, metav1.GetOptions{}); err != nil {
|
||||
backupItems, err := o.client.VeleroV1().Backups(f.Namespace()).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", api.ScheduleNameLabel, o.ScheduleName)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(backupItems.Items) == 0 {
|
||||
return errors.Errorf("No backups found for the schedule %s", o.ScheduleName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -48,6 +48,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
RegisterRestoreItemAction("velero.io/change-storage-class", newChangeStorageClassRestoreItemAction(f)).
|
||||
RegisterRestoreItemAction("velero.io/role-bindings", newRoleBindingItemAction).
|
||||
RegisterRestoreItemAction("velero.io/cluster-role-bindings", newClusterRoleBindingItemAction).
|
||||
RegisterRestoreItemAction("velero.io/crd-preserve-fields", newCRDV1PreserveUnknownFieldsItemAction).
|
||||
Serve()
|
||||
},
|
||||
}
|
||||
@@ -130,6 +131,10 @@ func newAddPVFromPVCRestoreItemAction(logger logrus.FieldLogger) (interface{}, e
|
||||
return restore.NewAddPVFromPVCAction(logger), nil
|
||||
}
|
||||
|
||||
func newCRDV1PreserveUnknownFieldsItemAction(logger logrus.FieldLogger) (interface{}, error) {
|
||||
return restore.NewCRDV1PreserveUnknownFieldsAction(logger), nil
|
||||
}
|
||||
|
||||
func newChangeStorageClassRestoreItemAction(f client.Factory) veleroplugin.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
client, err := f.KubeClient()
|
||||
|
||||
@@ -226,7 +226,6 @@ type server struct {
|
||||
logger logrus.FieldLogger
|
||||
logLevel logrus.Level
|
||||
pluginRegistry clientmgmt.Registry
|
||||
pluginManager clientmgmt.Manager
|
||||
resticManager restic.RepositoryManager
|
||||
metrics *metrics.ServerMetrics
|
||||
config serverConfig
|
||||
@@ -262,10 +261,6 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
|
||||
if err := pluginRegistry.DiscoverPlugins(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pluginManager := clientmgmt.NewManager(logger, logger.Level, pluginRegistry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
@@ -288,7 +283,6 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
|
||||
logger: logger,
|
||||
logLevel: logger.Level,
|
||||
pluginRegistry: pluginRegistry,
|
||||
pluginManager: pluginManager,
|
||||
config: config,
|
||||
}
|
||||
|
||||
@@ -296,8 +290,6 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
|
||||
}
|
||||
|
||||
func (s *server) run() error {
|
||||
defer s.pluginManager.CleanupClients()
|
||||
|
||||
signals.CancelOnShutdown(s.cancelFunc, s.logger)
|
||||
|
||||
if s.config.profilerAddress != "" {
|
||||
@@ -420,6 +412,9 @@ func (s *server) veleroResourcesExist() error {
|
||||
func (s *server) validateBackupStorageLocations() error {
|
||||
s.logger.Info("Checking that all backup storage locations are valid")
|
||||
|
||||
pluginManager := clientmgmt.NewManager(s.logger, s.logLevel, s.pluginRegistry)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
locations, err := s.veleroClient.VeleroV1().BackupStorageLocations(s.namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
@@ -427,7 +422,7 @@ func (s *server) validateBackupStorageLocations() error {
|
||||
|
||||
var invalid []string
|
||||
for _, location := range locations.Items {
|
||||
backupStore, err := persistence.NewObjectBackupStore(&location, s.pluginManager, s.logger)
|
||||
backupStore, err := persistence.NewObjectBackupStore(&location, pluginManager, s.logger)
|
||||
if err != nil {
|
||||
invalid = append(invalid, errors.Wrapf(err, "error getting backup store for location %q", location.Name).Error())
|
||||
continue
|
||||
@@ -445,7 +440,9 @@ func (s *server) validateBackupStorageLocations() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// - Namespaces go first because all namespaced resources depend on them.
|
||||
// - Custom Resource Definitions come before Custom Resource so that they can be
|
||||
// restored with their corresponding CRD.
|
||||
// - Namespaces go second because all namespaced resources depend on them.
|
||||
// - Storage Classes are needed to create PVs and PVCs correctly.
|
||||
// - PVs go before PVCs because PVCs depend on them.
|
||||
// - PVCs go before pods or controllers so they can be mounted as volumes.
|
||||
@@ -455,9 +452,10 @@ func (s *server) validateBackupStorageLocations() error {
|
||||
// - Limit ranges go before pods or controllers so pods can use them.
|
||||
// - Pods go before controllers so they can be explicitly restored and potentially
|
||||
// have restic restores run before controllers adopt the pods.
|
||||
// - Custom Resource Definitions come before Custom Resource so that they can be
|
||||
// restored with their corresponding CRD.
|
||||
// - Replica sets go before deployments/other controllers so they can be explicitly
|
||||
// restored and be adopted by controllers.
|
||||
var defaultRestorePriorities = []string{
|
||||
"customresourcedefinitions",
|
||||
"namespaces",
|
||||
"storageclasses",
|
||||
"persistentvolumes",
|
||||
@@ -467,8 +465,11 @@ var defaultRestorePriorities = []string{
|
||||
"serviceaccounts",
|
||||
"limitranges",
|
||||
"pods",
|
||||
"replicaset",
|
||||
"customresourcedefinitions",
|
||||
// we fully qualify replicasets.apps because prior to Kubernetes 1.16, replicasets also
|
||||
// existed in the extensions API group, but we back up replicasets from "apps" so we want
|
||||
// to ensure that we prioritize restoring from "apps" too, since this is how they're stored
|
||||
// in the backup.
|
||||
"replicasets.apps",
|
||||
}
|
||||
|
||||
func (s *server) initRestic() error {
|
||||
|
||||
@@ -60,9 +60,7 @@ func Stream(client velerov1client.DownloadRequestsGetter, namespace, name string
|
||||
defer client.DownloadRequests(namespace).Delete(req.Name, nil)
|
||||
|
||||
listOptions := metav1.ListOptions{
|
||||
// TODO: once the minimum supported Kubernetes version is v1.9.0, uncomment the following line.
|
||||
// See http://issue.k8s.io/51046 for details.
|
||||
//FieldSelector: "metadata.name=" + req.Name
|
||||
FieldSelector: "metadata.name=" + req.Name,
|
||||
ResourceVersion: req.ResourceVersion,
|
||||
}
|
||||
watcher, err := client.DownloadRequests(namespace).Watch(listOptions)
|
||||
@@ -85,12 +83,6 @@ Loop:
|
||||
return errors.Errorf("unexpected type %T", e.Object)
|
||||
}
|
||||
|
||||
// TODO: once the minimum supported Kubernetes version is v1.9.0, remove the following check.
|
||||
// See http://issue.k8s.io/51046 for details.
|
||||
if updated.Name != req.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
switch e.Type {
|
||||
case watch.Deleted:
|
||||
errors.New("download request was unexpectedly deleted")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017, 2019 the Velero contributors.
|
||||
Copyright 2017, 2019, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -43,18 +42,14 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printBackupList(list *velerov1api.BackupList, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printBackupList(list *velerov1api.BackupList) []metav1.TableRow {
|
||||
sortBackupsByPrefixAndTimestamp(list)
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
r, err := printBackup(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printBackup(&list.Items[i])...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
// sort by default alphabetically, but if backups stem from a common schedule
|
||||
@@ -83,7 +78,7 @@ func sortBackupsByPrefixAndTimestamp(list *velerov1api.BackupList) {
|
||||
})
|
||||
}
|
||||
|
||||
func printBackup(backup *velerov1api.Backup, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printBackup(backup *velerov1api.Backup) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: backup},
|
||||
}
|
||||
@@ -116,7 +111,7 @@ func printBackup(backup *velerov1api.Backup, options printers.PrintOptions) ([]m
|
||||
|
||||
row.Cells = append(row.Cells, backup.Name, status, backup.Status.StartTimestamp, humanReadableTimeFromNow(expiration), location, metav1.FormatLabelSelector(backup.Spec.LabelSelector))
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
func humanReadableTimeFromNow(when time.Time) string {
|
||||
|
||||
@@ -19,7 +19,6 @@ package output
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -35,20 +34,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printBackupStorageLocationList(list *v1.BackupStorageLocationList, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printBackupStorageLocationList(list *v1.BackupStorageLocationList) []metav1.TableRow {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
r, err := printBackupStorageLocation(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printBackupStorageLocation(&list.Items[i])...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
func printBackupStorageLocation(location *v1.BackupStorageLocation, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printBackupStorageLocation(location *v1.BackupStorageLocation) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: location},
|
||||
}
|
||||
@@ -70,5 +65,5 @@ func printBackupStorageLocation(location *v1.BackupStorageLocation, options prin
|
||||
accessMode,
|
||||
)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -25,9 +25,11 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/encode"
|
||||
)
|
||||
@@ -141,26 +143,90 @@ func printEncoded(obj runtime.Object, format string) (bool, error) {
|
||||
}
|
||||
|
||||
func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
|
||||
printer, err := NewPrinter(cmd)
|
||||
// 1. generate table
|
||||
var table *metav1.Table
|
||||
|
||||
switch obj.(type) {
|
||||
case *velerov1api.Backup:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupColumns,
|
||||
Rows: printBackup(obj.(*velerov1api.Backup)),
|
||||
}
|
||||
case *velerov1api.BackupList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupColumns,
|
||||
Rows: printBackupList(obj.(*velerov1api.BackupList)),
|
||||
}
|
||||
case *velerov1api.Restore:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: restoreColumns,
|
||||
Rows: printRestore(obj.(*velerov1api.Restore)),
|
||||
}
|
||||
case *velerov1api.RestoreList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: restoreColumns,
|
||||
Rows: printRestoreList(obj.(*velerov1api.RestoreList)),
|
||||
}
|
||||
case *velerov1api.Schedule:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: scheduleColumns,
|
||||
Rows: printSchedule(obj.(*velerov1api.Schedule)),
|
||||
}
|
||||
case *velerov1api.ScheduleList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: scheduleColumns,
|
||||
Rows: printScheduleList(obj.(*velerov1api.ScheduleList)),
|
||||
}
|
||||
case *velerov1api.ResticRepository:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: resticRepoColumns,
|
||||
Rows: printResticRepo(obj.(*velerov1api.ResticRepository)),
|
||||
}
|
||||
case *velerov1api.ResticRepositoryList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: resticRepoColumns,
|
||||
Rows: printResticRepoList(obj.(*velerov1api.ResticRepositoryList)),
|
||||
}
|
||||
case *velerov1api.BackupStorageLocation:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupStorageLocationColumns,
|
||||
Rows: printBackupStorageLocation(obj.(*velerov1api.BackupStorageLocation)),
|
||||
}
|
||||
case *velerov1api.BackupStorageLocationList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupStorageLocationColumns,
|
||||
Rows: printBackupStorageLocationList(obj.(*velerov1api.BackupStorageLocationList)),
|
||||
}
|
||||
case *velerov1api.VolumeSnapshotLocation:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: volumeSnapshotLocationColumns,
|
||||
Rows: printVolumeSnapshotLocation(obj.(*velerov1api.VolumeSnapshotLocation)),
|
||||
}
|
||||
case *velerov1api.VolumeSnapshotLocationList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: volumeSnapshotLocationColumns,
|
||||
Rows: printVolumeSnapshotLocationList(obj.(*velerov1api.VolumeSnapshotLocationList)),
|
||||
}
|
||||
case *velerov1api.ServerStatusRequest:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: pluginColumns,
|
||||
Rows: printPluginList(obj.(*velerov1api.ServerStatusRequest)),
|
||||
}
|
||||
default:
|
||||
return false, errors.Errorf("type %T is not supported", obj)
|
||||
}
|
||||
|
||||
if table == nil {
|
||||
return false, errors.Errorf("error generating table for type %T", obj)
|
||||
}
|
||||
|
||||
// 2. print table
|
||||
tablePrinter, err := NewPrinter(cmd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
printer.TableHandler(backupColumns, printBackup)
|
||||
printer.TableHandler(backupColumns, printBackupList)
|
||||
printer.TableHandler(restoreColumns, printRestore)
|
||||
printer.TableHandler(restoreColumns, printRestoreList)
|
||||
printer.TableHandler(scheduleColumns, printSchedule)
|
||||
printer.TableHandler(scheduleColumns, printScheduleList)
|
||||
printer.TableHandler(resticRepoColumns, printResticRepo)
|
||||
printer.TableHandler(resticRepoColumns, printResticRepoList)
|
||||
printer.TableHandler(backupStorageLocationColumns, printBackupStorageLocation)
|
||||
printer.TableHandler(backupStorageLocationColumns, printBackupStorageLocationList)
|
||||
printer.TableHandler(volumeSnapshotLocationColumns, printVolumeSnapshotLocation)
|
||||
printer.TableHandler(volumeSnapshotLocationColumns, printVolumeSnapshotLocationList)
|
||||
printer.TableHandler(pluginColumns, printPluginList)
|
||||
|
||||
err = printer.PrintObj(obj, os.Stdout)
|
||||
err = tablePrinter.PrintObj(table, os.Stdout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -170,7 +236,7 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
|
||||
|
||||
// NewPrinter returns a printer for doing human-readable table printing of
|
||||
// Velero objects.
|
||||
func NewPrinter(cmd *cobra.Command) (*printers.HumanReadablePrinter, error) {
|
||||
func NewPrinter(cmd *cobra.Command) (printers.ResourcePrinter, error) {
|
||||
options := printers.PrintOptions{
|
||||
ShowLabels: GetShowLabelsValue(cmd),
|
||||
ColumnLabels: GetLabelColumnsValues(cmd),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
Copyright 2019, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"sort"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -34,20 +33,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printPluginList(list *velerov1api.ServerStatusRequest, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printPluginList(list *velerov1api.ServerStatusRequest) []metav1.TableRow {
|
||||
plugins := list.Status.Plugins
|
||||
sortByKindAndName(plugins)
|
||||
|
||||
rows := make([]metav1.TableRow, 0, len(plugins))
|
||||
|
||||
for _, plugin := range plugins {
|
||||
r, err := printPlugin(plugin, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printPlugin(plugin)...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
func sortByKindAndName(plugins []velerov1api.PluginInfo) {
|
||||
@@ -59,10 +54,10 @@ func sortByKindAndName(plugins []velerov1api.PluginInfo) {
|
||||
})
|
||||
}
|
||||
|
||||
func printPlugin(plugin velerov1api.PluginInfo, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printPlugin(plugin velerov1api.PluginInfo) []metav1.TableRow {
|
||||
row := metav1.TableRow{}
|
||||
|
||||
row.Cells = append(row.Cells, plugin.Name, plugin.Kind)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 the Velero contributors.
|
||||
Copyright 2018, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -19,7 +19,6 @@ package output
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -34,20 +33,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printResticRepoList(list *v1.ResticRepositoryList, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printResticRepoList(list *v1.ResticRepositoryList) []metav1.TableRow {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
r, err := printResticRepo(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printResticRepo(&list.Items[i])...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
func printResticRepo(repo *v1.ResticRepository, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printResticRepo(repo *v1.ResticRepository) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: repo},
|
||||
}
|
||||
@@ -68,5 +63,5 @@ func printResticRepo(repo *v1.ResticRepository, options printers.PrintOptions) (
|
||||
lastMaintenance,
|
||||
)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -19,7 +19,6 @@ package output
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -38,20 +37,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printRestoreList(list *v1.RestoreList, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printRestoreList(list *v1.RestoreList) []metav1.TableRow {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
r, err := printRestore(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printRestore(&list.Items[i])...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
func printRestore(restore *v1.Restore, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printRestore(restore *v1.Restore) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: restore},
|
||||
}
|
||||
@@ -71,5 +66,5 @@ func printRestore(restore *v1.Restore, options printers.PrintOptions) ([]metav1.
|
||||
metav1.FormatLabelSelector(restore.Spec.LabelSelector),
|
||||
)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -40,20 +39,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printScheduleList(list *v1.ScheduleList, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printScheduleList(list *v1.ScheduleList) []metav1.TableRow {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
r, err := printSchedule(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printSchedule(&list.Items[i])...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
func printSchedule(schedule *v1.Schedule, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printSchedule(schedule *v1.Schedule) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: schedule},
|
||||
}
|
||||
@@ -78,5 +73,5 @@ func printSchedule(schedule *v1.Schedule, options printers.PrintOptions) ([]meta
|
||||
metav1.FormatLabelSelector(schedule.Spec.Template.LabelSelector),
|
||||
)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 the Velero contributors.
|
||||
Copyright 2018, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -19,7 +19,6 @@ package output
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
@@ -33,20 +32,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func printVolumeSnapshotLocationList(list *v1.VolumeSnapshotLocationList, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printVolumeSnapshotLocationList(list *v1.VolumeSnapshotLocationList) []metav1.TableRow {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
r, err := printVolumeSnapshotLocation(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
rows = append(rows, printVolumeSnapshotLocation(&list.Items[i])...)
|
||||
}
|
||||
return rows, nil
|
||||
return rows
|
||||
}
|
||||
|
||||
func printVolumeSnapshotLocation(location *v1.VolumeSnapshotLocation, options printers.PrintOptions) ([]metav1.TableRow, error) {
|
||||
func printVolumeSnapshotLocation(location *v1.VolumeSnapshotLocation) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: location},
|
||||
}
|
||||
@@ -56,5 +51,5 @@ func printVolumeSnapshotLocation(location *v1.VolumeSnapshotLocation, options pr
|
||||
location.Spec.Provider,
|
||||
)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -148,12 +148,44 @@ func NewBackupController(
|
||||
}
|
||||
|
||||
func (c *backupController) resync() {
|
||||
// recompute backup_total metric
|
||||
backups, err := c.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
c.logger.Error(err, "Error computing backup_total metric")
|
||||
} else {
|
||||
c.metrics.SetBackupTotal(int64(len(backups)))
|
||||
}
|
||||
|
||||
// recompute backup_last_successful_timestamp metric for each
|
||||
// schedule (including the empty schedule, i.e. ad-hoc backups)
|
||||
for schedule, timestamp := range getLastSuccessBySchedule(backups) {
|
||||
c.metrics.SetBackupLastSuccessfulTimestamp(schedule, timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
// getLastSuccessBySchedule finds the most recent completed backup for each schedule
|
||||
// and returns a map of schedule name -> completion time of the most recent completed
|
||||
// backup. This map includes an entry for ad-hoc/non-scheduled backups, where the key
|
||||
// is the empty string.
|
||||
func getLastSuccessBySchedule(backups []*velerov1api.Backup) map[string]time.Time {
|
||||
lastSuccessBySchedule := map[string]time.Time{}
|
||||
for _, backup := range backups {
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseCompleted {
|
||||
continue
|
||||
}
|
||||
if backup.Status.CompletionTimestamp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
schedule := backup.Labels[velerov1api.ScheduleNameLabel]
|
||||
timestamp := backup.Status.CompletionTimestamp.Time
|
||||
|
||||
if timestamp.After(lastSuccessBySchedule[schedule]) {
|
||||
lastSuccessBySchedule[schedule] = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
return lastSuccessBySchedule
|
||||
}
|
||||
|
||||
func (c *backupController) processBackup(key string) error {
|
||||
|
||||
@@ -738,3 +738,119 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_getLastSuccessBySchedule verifies that the getLastSuccessBySchedule helper function correctly returns
|
||||
// the completion timestamp of the most recent completed backup for each schedule, including an entry for ad-hoc
|
||||
// or non-scheduled backups.
|
||||
func Test_getLastSuccessBySchedule(t *testing.T) {
|
||||
buildBackup := func(phase velerov1api.BackupPhase, completion time.Time, schedule string) *velerov1api.Backup {
|
||||
b := builder.ForBackup("", "").
|
||||
ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, schedule)).
|
||||
Phase(phase)
|
||||
|
||||
if !completion.IsZero() {
|
||||
b.CompletionTimestamp(completion)
|
||||
}
|
||||
|
||||
return b.Result()
|
||||
}
|
||||
|
||||
// create a static "base time" that can be used to easily construct completion timestamps
|
||||
// by using the .Add(...) method.
|
||||
baseTime, err := time.Parse(time.RFC1123, time.RFC1123)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
backups []*velerov1api.Backup
|
||||
want map[string]time.Time
|
||||
}{
|
||||
{
|
||||
name: "when backups is nil, an empty map is returned",
|
||||
backups: nil,
|
||||
want: map[string]time.Time{},
|
||||
},
|
||||
{
|
||||
name: "when backups is empty, an empty map is returned",
|
||||
backups: []*velerov1api.Backup{},
|
||||
want: map[string]time.Time{},
|
||||
},
|
||||
{
|
||||
name: "when multiple completed backups for a schedule exist, the latest one is returned",
|
||||
backups: []*velerov1api.Backup{
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime, "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(time.Second), "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(-time.Second), "schedule-1"),
|
||||
},
|
||||
want: map[string]time.Time{
|
||||
"schedule-1": baseTime.Add(time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when the most recent backup for a schedule is Failed, the timestamp of the most recent Completed one is returned",
|
||||
backups: []*velerov1api.Backup{
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime, "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseFailed, baseTime.Add(time.Second), "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(-time.Second), "schedule-1"),
|
||||
},
|
||||
want: map[string]time.Time{
|
||||
"schedule-1": baseTime,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when there are no Completed backups for a schedule, it's not returned",
|
||||
backups: []*velerov1api.Backup{
|
||||
buildBackup(velerov1api.BackupPhaseInProgress, baseTime, "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseFailed, baseTime.Add(time.Second), "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhasePartiallyFailed, baseTime.Add(-time.Second), "schedule-1"),
|
||||
},
|
||||
want: map[string]time.Time{},
|
||||
},
|
||||
{
|
||||
name: "when backups exist without a schedule, the most recent Completed one is returned",
|
||||
backups: []*velerov1api.Backup{
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime, ""),
|
||||
buildBackup(velerov1api.BackupPhaseFailed, baseTime.Add(time.Second), ""),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(-time.Second), ""),
|
||||
},
|
||||
want: map[string]time.Time{
|
||||
"": baseTime,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when backups exist for multiple schedules, the most recent Completed timestamp for each schedule is returned",
|
||||
backups: []*velerov1api.Backup{
|
||||
// ad-hoc backups (no schedule)
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(30*time.Minute), ""),
|
||||
buildBackup(velerov1api.BackupPhaseFailed, baseTime.Add(time.Hour), ""),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(-time.Second), ""),
|
||||
|
||||
// schedule-1
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime, "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseFailed, baseTime.Add(time.Second), "schedule-1"),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(-time.Second), "schedule-1"),
|
||||
|
||||
// schedule-2
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(24*time.Hour), "schedule-2"),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(48*time.Hour), "schedule-2"),
|
||||
buildBackup(velerov1api.BackupPhaseCompleted, baseTime.Add(72*time.Hour), "schedule-2"),
|
||||
|
||||
// schedule-3
|
||||
buildBackup(velerov1api.BackupPhaseNew, baseTime, "schedule-3"),
|
||||
buildBackup(velerov1api.BackupPhaseInProgress, baseTime.Add(time.Minute), "schedule-3"),
|
||||
buildBackup(velerov1api.BackupPhasePartiallyFailed, baseTime.Add(2*time.Minute), "schedule-3"),
|
||||
},
|
||||
want: map[string]time.Time{
|
||||
"": baseTime.Add(30 * time.Minute),
|
||||
"schedule-1": baseTime,
|
||||
"schedule-2": baseTime.Add(72 * time.Hour),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.want, getLastSuccessBySchedule(tc.backups))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user