mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-03-27 12:05:05 +00:00
Compare commits
289 Commits
v0.10.1
...
v1.0.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d05f8e53d8 | ||
|
|
a1eb8411f9 | ||
|
|
e52b04dfa3 | ||
|
|
8c2c96adeb | ||
|
|
a27e1a4c02 | ||
|
|
757a9862a5 | ||
|
|
363748667b | ||
|
|
d4f9c62449 | ||
|
|
e0bc14d56b | ||
|
|
f27ff115e2 | ||
|
|
b82e221310 | ||
|
|
c96b7b3e40 | ||
|
|
58d34700da | ||
|
|
05b8edf894 | ||
|
|
e5e3bc6b89 | ||
|
|
90c89f764e | ||
|
|
cdd583b8bf | ||
|
|
e0b3e6fa5f | ||
|
|
24d28c8633 | ||
|
|
0462217c9b | ||
|
|
e35cf8845d | ||
|
|
7a04e987ea | ||
|
|
4352158435 | ||
|
|
71f358c160 | ||
|
|
884e512f93 | ||
|
|
26d86f514c | ||
|
|
c0e105f5cd | ||
|
|
6ebedf6b25 | ||
|
|
de79f4d0b7 | ||
|
|
2406994740 | ||
|
|
ac9e0173e8 | ||
|
|
c95abf69b2 | ||
|
|
f3d36afd3a | ||
|
|
0205a43028 | ||
|
|
4e12b08953 | ||
|
|
721d19c7bf | ||
|
|
28612afa27 | ||
|
|
987ce55894 | ||
|
|
4514c2e27d | ||
|
|
660080e6f3 | ||
|
|
dea81bbe15 | ||
|
|
3b5de11c74 | ||
|
|
4ed63edea0 | ||
|
|
8392e6d83f | ||
|
|
42f351b000 | ||
|
|
bf19623e82 | ||
|
|
f2b4e73e2e | ||
|
|
9e19ab8d8b | ||
|
|
26b940c81c | ||
|
|
011db15f1c | ||
|
|
dda76b05a8 | ||
|
|
c7025b98e4 | ||
|
|
58bb7ed3aa | ||
|
|
6f496a8921 | ||
|
|
3155bb159f | ||
|
|
dcd663b8cf | ||
|
|
0471c6ee35 | ||
|
|
15aaa7bb9d | ||
|
|
fc25f0ae89 | ||
|
|
c201a2c103 | ||
|
|
1a55964326 | ||
|
|
eb30ec0666 | ||
|
|
e7e666306c | ||
|
|
8cd46b8a0c | ||
|
|
db9f8e16d9 | ||
|
|
9cd2862c8e | ||
|
|
7e949080cf | ||
|
|
7d28f82540 | ||
|
|
b6cfce2dd9 | ||
|
|
64dd3ed7ad | ||
|
|
e9c131df71 | ||
|
|
662a36df90 | ||
|
|
38ccb40ca1 | ||
|
|
c59d03dfb1 | ||
|
|
c7bb288d87 | ||
|
|
05a88345e5 | ||
|
|
01d0b026e9 | ||
|
|
134323fbf7 | ||
|
|
8870281afc | ||
|
|
6dd007b507 | ||
|
|
e85c367ce5 | ||
|
|
b01b12472f | ||
|
|
475cf2ab60 | ||
|
|
f8f0d15da2 | ||
|
|
6020823aaf | ||
|
|
ff642d739d | ||
|
|
0750b2c789 | ||
|
|
975bec692b | ||
|
|
38604e88fe | ||
|
|
c475108345 | ||
|
|
f2418052e4 | ||
|
|
2cef9d26ec | ||
|
|
5bc6695109 | ||
|
|
8c9ae491f0 | ||
|
|
39bab5ada9 | ||
|
|
316e6cc67e | ||
|
|
6f474016a6 | ||
|
|
bc8f07f963 | ||
|
|
9470983d5f | ||
|
|
94f014101d | ||
|
|
c38def0849 | ||
|
|
66c6d7a026 | ||
|
|
9b9b4f666e | ||
|
|
373e4c9abe | ||
|
|
ce374584c4 | ||
|
|
c59544cb79 | ||
|
|
6ed4e1f147 | ||
|
|
b04d6b02f3 | ||
|
|
7f36f78aee | ||
|
|
892673816b | ||
|
|
c8c03a38e9 | ||
|
|
ede9a8f5b4 | ||
|
|
b87de94723 | ||
|
|
77e648eafa | ||
|
|
d49008dec0 | ||
|
|
b03da3c0ed | ||
|
|
49cb4cd5c3 | ||
|
|
3ed97db550 | ||
|
|
44acdcbc60 | ||
|
|
5d06bd4ab9 | ||
|
|
0328a70ff0 | ||
|
|
8d61cb0384 | ||
|
|
f879670906 | ||
|
|
7251c8ca81 | ||
|
|
02cbb77dea | ||
|
|
d679498c8a | ||
|
|
c326f59627 | ||
|
|
bc93b2bbac | ||
|
|
3116185e5b | ||
|
|
abee09aa2d | ||
|
|
0e0f357cef | ||
|
|
23c0d3f612 | ||
|
|
4beb8aab3c | ||
|
|
b444d3c2f1 | ||
|
|
13eaad0e64 | ||
|
|
956152d6e1 | ||
|
|
bca21a1ec0 | ||
|
|
2f47ca62ad | ||
|
|
a519547efc | ||
|
|
0167539a14 | ||
|
|
985479094f | ||
|
|
a611658436 | ||
|
|
0f442b002d | ||
|
|
a774b54ae7 | ||
|
|
2e3f00f64d | ||
|
|
c3a933d3e3 | ||
|
|
bbd28a9fb9 | ||
|
|
23b1098950 | ||
|
|
1d3d66aa77 | ||
|
|
40c7fbce09 | ||
|
|
6bf29e17aa | ||
|
|
7298a4eda0 | ||
|
|
2a36cdcbf6 | ||
|
|
dcee310745 | ||
|
|
a696cd09f2 | ||
|
|
be42ea782d | ||
|
|
9b635c0e14 | ||
|
|
477e42286c | ||
|
|
21f3169ad3 | ||
|
|
59e0ef4524 | ||
|
|
86293b68b3 | ||
|
|
e4e0ed68a6 | ||
|
|
bb9c3f6a1a | ||
|
|
3f2c28f6bb | ||
|
|
60460f6920 | ||
|
|
7b0d8217de | ||
|
|
f8baf4f4f0 | ||
|
|
b1c0e9c49b | ||
|
|
4d7add1782 | ||
|
|
7af9f8d74e | ||
|
|
ff2db31b32 | ||
|
|
bd662ab613 | ||
|
|
01f2ae76e2 | ||
|
|
a111eed2af | ||
|
|
4c73e23ce8 | ||
|
|
a71e43b2b7 | ||
|
|
1eac10ca9f | ||
|
|
7dfe58d37f | ||
|
|
78bf8fb868 | ||
|
|
7d66fc31bd | ||
|
|
183bea369d | ||
|
|
de09fd7cdc | ||
|
|
f64b37289d | ||
|
|
73514a003b | ||
|
|
7674332313 | ||
|
|
409116fce8 | ||
|
|
503b112638 | ||
|
|
b286c652ec | ||
|
|
89ca2571f3 | ||
|
|
394548afcd | ||
|
|
4ee41a13a0 | ||
|
|
4041044a93 | ||
|
|
5e12a921b5 | ||
|
|
1354e2b6ff | ||
|
|
e29aa74a23 | ||
|
|
ce3f43e876 | ||
|
|
5912fe66e5 | ||
|
|
c006d9246f | ||
|
|
1b031f0cc4 | ||
|
|
88e6a740f2 | ||
|
|
0fec56f488 | ||
|
|
e21940bee1 | ||
|
|
421b64b1fa | ||
|
|
81e741ebfc | ||
|
|
fcf21813a5 | ||
|
|
8dd1cbf62b | ||
|
|
65f3926caa | ||
|
|
31501b79b2 | ||
|
|
6bf837b233 | ||
|
|
f908d5f8c0 | ||
|
|
f8548e1ca1 | ||
|
|
58e471bda0 | ||
|
|
61eab7dca3 | ||
|
|
efc490138c | ||
|
|
80fe640b98 | ||
|
|
21c57c46b3 | ||
|
|
7353294b7f | ||
|
|
7e736ab79d | ||
|
|
5468ccf5cb | ||
|
|
032aaac508 | ||
|
|
ab2fc65c02 | ||
|
|
03b8f5397f | ||
|
|
431602e852 | ||
|
|
cb0a9281f6 | ||
|
|
783c7d850c | ||
|
|
e3e76c2067 | ||
|
|
e4771f582b | ||
|
|
4e0b0c87bb | ||
|
|
3724af259c | ||
|
|
522ee9ad36 | ||
|
|
1b3c444720 | ||
|
|
3d2b031ee4 | ||
|
|
8be6f03ef0 | ||
|
|
e2f84a1242 | ||
|
|
49eeeb04f0 | ||
|
|
e1d414338c | ||
|
|
0ffaeb949d | ||
|
|
ed73be44fd | ||
|
|
988ce573c0 | ||
|
|
780dc4551f | ||
|
|
32835c63f6 | ||
|
|
86c5c25d13 | ||
|
|
250f109c41 | ||
|
|
d8e9b772ff | ||
|
|
88fc6e2141 | ||
|
|
38ad7d71f5 | ||
|
|
e91c841c59 | ||
|
|
902c0f797f | ||
|
|
296dd6617e | ||
|
|
4cd8170386 | ||
|
|
551aaa646d | ||
|
|
0df30c1e89 | ||
|
|
378011baf6 | ||
|
|
b2b1ee44ea | ||
|
|
4583aa7078 | ||
|
|
b15970d3ef | ||
|
|
9df3947745 | ||
|
|
2364393b7c | ||
|
|
ee2b352489 | ||
|
|
890202f2e4 | ||
|
|
3c7737c8b1 | ||
|
|
ca8e951ac6 | ||
|
|
52ecc45ec8 | ||
|
|
8ee406b4bd | ||
|
|
46e87661c0 | ||
|
|
723cda2697 | ||
|
|
5f0ff026b0 | ||
|
|
0a810ced54 | ||
|
|
c1a817b4e9 | ||
|
|
478d12b4ff | ||
|
|
328bc361be | ||
|
|
7913ae1867 | ||
|
|
c0a55e136b | ||
|
|
3054a38bd6 | ||
|
|
381149cedf | ||
|
|
db9dacae54 | ||
|
|
77327db062 | ||
|
|
1675943f44 | ||
|
|
43714caaec | ||
|
|
bbc6caf7fe | ||
|
|
25299513c1 | ||
|
|
e61d3c6ca0 | ||
|
|
c56e3e5af3 | ||
|
|
78cb813210 | ||
|
|
f90b8f9473 | ||
|
|
8a58b217be | ||
|
|
5847dcabba | ||
|
|
ad5146b9b1 | ||
|
|
d08c2e1b9c |
12
.github/ISSUE_TEMPLATE/bug_report.md
vendored
12
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -14,11 +14,11 @@ about: Tell us about a problem you are experiencing
|
||||
**The output of the following commands will help us better understand what's going on**:
|
||||
(Pasting long output into a [GitHub gist](https://gist.github.com) or other pastebin is fine.)
|
||||
|
||||
* `kubectl logs deployment/ark -n heptio-ark`
|
||||
* `ark backup describe <backupname>` or `kubectl get backup/<backupname> -n heptio-ark -o yaml`
|
||||
* `ark backup logs <backupname>`
|
||||
* `ark restore describe <restorename>` or `kubectl get restore/<restorename> -n heptio-ark -o yaml`
|
||||
* `ark restore logs <restorename>`
|
||||
* `kubectl logs deployment/velero -n velero`
|
||||
* `velero backup describe <backupname>` or `kubectl get backup/<backupname> -n velero -o yaml`
|
||||
* `velero backup logs <backupname>`
|
||||
* `velero restore describe <restorename>` or `kubectl get restore/<restorename> -n velero -o yaml`
|
||||
* `velero restore logs <restorename>`
|
||||
|
||||
|
||||
**Anything else you would like to add:**
|
||||
@@ -27,7 +27,7 @@ about: Tell us about a problem you are experiencing
|
||||
|
||||
**Environment:**
|
||||
|
||||
- Ark version (use `ark version`):
|
||||
- Velero version (use `velero version`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
|
||||
@@ -14,7 +14,7 @@ about: Suggest an idea for this project
|
||||
|
||||
**Environment:**
|
||||
|
||||
- Ark version (use `ark version`):
|
||||
- Velero version (use `velero version`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -7,7 +7,6 @@
|
||||
_obj
|
||||
_test
|
||||
_output
|
||||
config
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
@@ -27,7 +26,7 @@ _testmain.go
|
||||
|
||||
debug
|
||||
|
||||
/ark
|
||||
/velero
|
||||
.idea/
|
||||
|
||||
.container-*
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
# Copyright 2018 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,11 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
dist: _output
|
||||
before:
|
||||
hooks:
|
||||
- ./hack/set-example-tags.sh
|
||||
builds:
|
||||
- main: ./cmd/ark/main.go
|
||||
- main: ./cmd/velero/main.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
@@ -39,16 +36,17 @@ builds:
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
ldflags:
|
||||
- -X "github.com/heptio/ark/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/ark/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/ark/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
- -X "github.com/heptio/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
archive:
|
||||
name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}"
|
||||
wrap_in_directory: true
|
||||
files:
|
||||
- LICENSE
|
||||
- config/**/*
|
||||
- examples/**/*
|
||||
checksum:
|
||||
name_template: 'CHECKSUM'
|
||||
release:
|
||||
github:
|
||||
owner: heptio
|
||||
name: ark
|
||||
name: velero
|
||||
draft: true
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
|
||||
sudo: required
|
||||
|
||||
|
||||
36
CHANGELOG.md
36
CHANGELOG.md
@@ -1,21 +1,11 @@
|
||||
## Development release:
|
||||
* [Unreleased Changes][0]
|
||||
|
||||
* [Unreleased Changes][9]
|
||||
|
||||
### Bug Fixes / Other Changes
|
||||
* add multizone/regional support to gcp (#765, @wwitzel3)
|
||||
* Delete spec.priority in pod restore action (#879, @mwieczorek)
|
||||
* Added brew reference (#1051, @omerlh)
|
||||
* Update to go 1.11 (#1069, @gliptak)
|
||||
* Initialize empty schedule metrics on server init (#1054, @cbeneke)
|
||||
* Update CHANGELOGs (#1063, @wwitzel3)
|
||||
* Remove default token from all service accounts (#1048, @ncdc)
|
||||
* Allow to use AWS Signature v1 for creating signed AWS urls (#811, @bashofmann)
|
||||
|
||||
## Current release:
|
||||
* [CHANGELOG-0.10.md][8]
|
||||
* [CHANGELOG-0.11.md][9]
|
||||
|
||||
## Older releases:
|
||||
* [CHANGELOG-0.10.md][8]
|
||||
* [CHANGELOG-0.9.md][7]
|
||||
* [CHANGELOG-0.8.md][6]
|
||||
* [CHANGELOG-0.7.md][5]
|
||||
@@ -24,12 +14,14 @@
|
||||
* [CHANGELOG-0.4.md][2]
|
||||
* [CHANGELOG-0.3.md][1]
|
||||
|
||||
[9]: https://github.com/heptio/ark/blob/master/changelogs/unreleased
|
||||
[8]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.10.md
|
||||
[7]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.9.md
|
||||
[6]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.8.md
|
||||
[5]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.7.md
|
||||
[4]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.6.md
|
||||
[3]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.5.md
|
||||
[2]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.4.md
|
||||
[1]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.3.md
|
||||
|
||||
[9]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.11.md
|
||||
[8]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.10.md
|
||||
[7]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.9.md
|
||||
[6]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.8.md
|
||||
[5]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.7.md
|
||||
[4]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.6.md
|
||||
[3]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.5.md
|
||||
[2]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.4.md
|
||||
[1]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.3.md
|
||||
[0]: https://github.com/heptio/velero/blob/master/changelogs/unreleased
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Heptio Ark Community Code of Conduct
|
||||
# Velero Community Code of Conduct
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ should be a new file created in the `changelogs/unreleased` folder. The file sho
|
||||
naming convention of `pr-username` and the contents of the file should be your text for the
|
||||
changelog.
|
||||
|
||||
ark/changelogs/unreleased <- folder
|
||||
velero/changelogs/unreleased <- folder
|
||||
000-username <- file
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ All authors to the project retain copyright to their work. However, to ensure
|
||||
that they are only submitting work that they have rights to, we are requiring
|
||||
everyone to acknowledge this by signing their work.
|
||||
|
||||
Any copyright notices in this repo should specify the authors as "the Heptio Ark project contributors".
|
||||
Any copyright notices in this repo should specify the authors as "the Velero contributors".
|
||||
|
||||
To sign your work, just add a line like this at the end of your commit message:
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
# Copyright 2018, 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,11 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: gcp-default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: gcp
|
||||
FROM debian:stretch-slim
|
||||
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c", "while true; do sleep 10000; done"]
|
||||
@@ -1,22 +0,0 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
MAINTAINER Wayne Witzel III <wayne@heptio.com>
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --update --no-cache busybox util-linux
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c", "while true; do sleep 10000; done"]
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2017 the Heptio Ark contributors.
|
||||
# Copyright 2017, 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,20 +12,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.8
|
||||
FROM debian:stretch-slim
|
||||
|
||||
MAINTAINER Andy Goldstein <andy@heptio.com>
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ca-certificates wget bzip2 && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.4/restic_0.9.4_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.4_linux_amd64.bz2 && \
|
||||
mv restic_0.9.4_linux_amd64 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic && \
|
||||
apt-get remove -y wget bzip2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apk add --update --no-cache bzip2 && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.3/restic_0.9.3_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.3_linux_amd64.bz2 && \
|
||||
mv restic_0.9.3_linux_amd64 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/amd64/ark /ark
|
||||
ADD /bin/linux/amd64/velero /velero
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT ["/ark"]
|
||||
ENTRYPOINT ["/velero"]
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
# Copyright 2018, 2019 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,12 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.8
|
||||
FROM debian:stretch-slim
|
||||
|
||||
MAINTAINER Steve Kriss <steve@heptio.com>
|
||||
LABEL maintainer="Steve Kriss <krisss@vmware.com>"
|
||||
|
||||
ADD /bin/linux/amd64/ark-restic-restore-helper .
|
||||
ADD /bin/linux/amd64/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT [ "/ark-restic-restore-helper" ]
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
||||
424
Gopkg.lock
generated
424
Gopkg.lock
generated
@@ -2,6 +2,7 @@
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:769af0c7dbdc19798e013900cfa855af9a7fda89912e019330a1dbd80a1e9a8c"
|
||||
name = "cloud.google.com/go"
|
||||
packages = [
|
||||
"compute/metadata",
|
||||
@@ -9,22 +10,27 @@
|
||||
"internal",
|
||||
"internal/optional",
|
||||
"internal/version",
|
||||
"storage"
|
||||
"storage",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "44bcd0b2078ba5e7fedbeb36808d1ed893534750"
|
||||
version = "v0.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5b71d15be52cbb93f5115f51ace93798204f6b4a3df0992d0b6da8644f505984"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"arm/disk",
|
||||
"services/storage/mgmt/2017-10-01/storage",
|
||||
"storage"
|
||||
"services/compute/mgmt/2018-04-01/compute",
|
||||
"services/storage/mgmt/2018-02-01/storage",
|
||||
"storage",
|
||||
"version",
|
||||
]
|
||||
revision = "2d1d76c9013c4feb6695a2346f0e66ea0ef77aa6"
|
||||
version = "v11.3.0-beta"
|
||||
pruneopts = "NUT"
|
||||
revision = "520918e6c8e8e1064154f51d13e02fad92b287b8"
|
||||
version = "v19.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b825d8578481c8877ff3b9a3654d77a48577cc33e65f33c3678d7e3f134bf73d"
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
@@ -32,11 +38,15 @@
|
||||
"autorest/azure",
|
||||
"autorest/date",
|
||||
"autorest/to",
|
||||
"autorest/validation"
|
||||
"autorest/validation",
|
||||
"version",
|
||||
]
|
||||
revision = "1ff28809256a84bb6966640ff3d0371af82ccba4"
|
||||
pruneopts = "NUT"
|
||||
revision = "bca49d5b51a50dc5bb17bbf6204c711c6dbded06"
|
||||
version = "v10.14.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f41188abdb95b92995643a927f5bdd208389822a8e1aba00d85633ae51b85c85"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
@@ -69,73 +79,92 @@
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
"service/sts",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1f8fb9d0919e5a58992207db9512a03f76ab0274"
|
||||
version = "v1.13.12"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "NUT"
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2"
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:da25cf063072a10461c19320e82117d85f9d60be4c95a62bc8d5a49acf7d0ca5"
|
||||
name = "github.com/docker/spdystream"
|
||||
packages = [
|
||||
".",
|
||||
"spdy"
|
||||
"spdy",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e8ffe2fb7368f65afaaf39769207bee2a7aeddf694e94f5bc05cffd750d4d98d"
|
||||
name = "github.com/evanphx/json-patch"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "944e07253867aacae43c04b2e6a239005443f33a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:021d6ee454d87208dd1cd731cd702d3521aa8a51ad2072fa7beffbb3d677d8bb"
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys"
|
||||
"sortkeys",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "100ba4e885062801d56799d78530b73b178a78f3"
|
||||
version = "v0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a98a0b00720dc3149bf3d0c8d5726188899e5bab2f5072b9a7ef82958fbc98b2"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
@@ -143,235 +172,331 @@
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
revision = "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
|
||||
pruneopts = "NUT"
|
||||
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:139e03a0b4ef05098c2acb7c081b2d84d9478cae11ac777f7c1f6d550efab1ca"
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "84ed26760e7f6f80887a2fbfb50db3cc415d2cea"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3d7c1446fc5c710351b246c0dc6700fae843ca27f5294d0bd9f68bab2a810c44"
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = [
|
||||
"OpenAPIv2",
|
||||
"compiler",
|
||||
"extensions"
|
||||
"extensions",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache"
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:32e5a56c443b5581e4bf6e74cdc78b5826d7e4c5df43883e2dc31e4d7f4ae98a"
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ca137eb4b4389c9bc6f1a6d887f056bf16c00510"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:143aae8d04a6133eea9c6400b90a1f47ae1100b48a1636160aba861d1b26c5b2"
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
packages = ["."]
|
||||
revision = "e2fbc6864d18d3c37b6cde4297ec9fca266d28f1"
|
||||
packages = [
|
||||
".",
|
||||
"internal/plugin",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "3f118e8ee104b6f22aeb12453fab56aed1356186"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:892e13370cbfcda090d8f7676ef67b50cb2ead5460b72f3a1c2bb1c19e9a57de"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru"
|
||||
"simplelru",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:73d3d2f8f2bcf510db08576eca6c1d2b87bcea348de26bf1386b291ad1b52296"
|
||||
name = "github.com/hashicorp/yamux"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f5742cb6b85602e7fa834e9d5d91a7d7fa850824"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:36480ab1ebec17489013e8a69d15451f47d0edbf8a54a45284857d13a0ebf692"
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc"
|
||||
version = "0.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a"
|
||||
name = "github.com/joho/godotenv"
|
||||
packages = ["."]
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
pruneopts = "NUT"
|
||||
revision = "23d116af351c84513e1946b527c88823e476be13"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
|
||||
version = "v1.1.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13ada91f079028d1b4ca88e10a16439dcfa6541d26ed2e61e770f56d06301933"
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:18b773b92ac82a451c1276bd2776c1e55ce057ee202691ab33c8d6690efcc048"
|
||||
name = "github.com/mitchellh/go-testing-interface"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
|
||||
version = "1.0.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3b517122f3aad1ecce45a630ea912b3092b4729f25532a911d0cb2935a1f9352"
|
||||
name = "github.com/oklog/run"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = "NUT"
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "NUT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "NUT"
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:768b555b86742de2f28beb37f1dedce9a75f91f871d75b5717c96399c1a78c08"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
"model",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c4a213a8d73fbb0b13f717ba7996116602ef18ecb42b91d77405877914cb0349"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "94663424ae5ae9856b40a9f170762b4197024661"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f53493533f0689ff978122bb36801af47fe549828ce786af9166694394c3fa0d"
|
||||
name = "github.com/robfig/cron"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/uuid"
|
||||
digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca"
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31c5d934770c8b0698c28eb8576cb39b14e2fcf3c5f2a6e8449116884cd92e3f"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7e6f7748181bd6004ace3f6ccd389a088bac357714364152fde0e5f9e0b588d7"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem"
|
||||
"mem",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9be650865eab0c12963d8753212f4f9c66cdcf12"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:60a46e2410edbf02b419f833372dd1d24d7aa1b916a990a7370e792fada1eadd"
|
||||
name = "github.com/stretchr/objx"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:72cea38d2957d95d18be2287ef9d4b06b89796d2e3070bc7f796bea3a4844381"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
"require"
|
||||
"require",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f977d7025e73f05091f406514f1d2cca36cc649d2af08d5f5223ebc6c475863"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
@@ -386,19 +511,23 @@
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/propagation",
|
||||
"trace/tracestate"
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:624a05c7c6ed502bf77364cd3d54631383dafc169982fddd8ee77b53c3d9cccf"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "eb71ad9bd329b5ac0fd0148dd99bd62e8be8e035"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ce8a4c0642d5e3881d1970f39008477671a2a5157d051c36d1618cf6bb669556"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
@@ -408,33 +537,39 @@
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b0fef33b00740f7eeb5198f67ee1642d8d2560e9b428df7fb5f69fb140f5c4d0"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9dcd33a902f40452422c2367fefcb95b54f9f8f8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:240624e43a0897823c99c74d446ec6de88134e6920b759815189be1a619113e6"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
"windows",
|
||||
]
|
||||
revision = "43e60d72a8e2bd92ee98319ba9a384a0e9837c08"
|
||||
pruneopts = "NUT"
|
||||
revision = "6c81ef8f67ca3f42fc9cd71dfbd5f35b0c4b5771"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0f6792185947c44cd78bc6a2f4399c44c7e85d406b3229a27d41f6cd0a8e982b"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"encoding",
|
||||
@@ -451,18 +586,21 @@
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e56139fd9c5bc7244c76116c68e500765bb6db6b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:51a479a09b7ed06b7be5a854e27fcc328718ae0e5ad159f9ddeef12d0326c2e7"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:cb42335b4b5606082a8e93f437a8962702e7875d157ccb03fba8cdd1ca70e8c3"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"compute/v1",
|
||||
@@ -475,11 +613,14 @@
|
||||
"option",
|
||||
"storage/v1",
|
||||
"transport/http",
|
||||
"transport/http/internal/propagation"
|
||||
"transport/http/internal/propagation",
|
||||
]
|
||||
revision = "3f6e8463aa1d824abe11b439d178c02220079da5"
|
||||
pruneopts = "NUT"
|
||||
revision = "0cbcb99a9ea0c8023c794b2693cbe1def82ed4d7"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7206d98ec77c90c72ec2c405181a1dcf86965803b6dbc4f98ceab7a5047c37a9"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
@@ -491,58 +632,84 @@
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch"
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a2059631b54cdc40db08f8c4dfb39d3c5ec442003506327df2c675a9384b7115"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = [
|
||||
"googleapis/api/annotations",
|
||||
"googleapis/iam/v1",
|
||||
"googleapis/rpc/status"
|
||||
"googleapis/rpc/status",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ee236bd376b077c7a89f260c026c4735b195e459"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8274473795baa9e1fc3b36fae1d8af131a03a7ae2456a8e87a6fda86af019f70"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"binarylog/grpc_binarylog_v1",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"grpclb/grpc_lb_v1",
|
||||
"credentials/internal",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"health",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/binarylog",
|
||||
"internal/channelz",
|
||||
"internal/envconfig",
|
||||
"internal/grpcrand",
|
||||
"internal/grpcsync",
|
||||
"internal/syscall",
|
||||
"internal/transport",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "b3ddf786825de56a4178401b7e174ee332173b66"
|
||||
version = "v1.5.2"
|
||||
pruneopts = "NUT"
|
||||
revision = "2fdaae294f38ed9a121193c51ec99fecd3b13eb7"
|
||||
version = "v1.19.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185"
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:c85dc78b3426641ebf2a0bbf5b731b5c4613ddb5987dbe218f7e75468dcd56f5"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:93e9a6515f47aaaf7f1c84617fc8c82db9216f7290c4d4149afeaf6936d9aa5e"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admission/v1beta1",
|
||||
@@ -557,10 +724,12 @@
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"autoscaling/v2beta2",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
@@ -575,22 +744,25 @@
|
||||
"settings/v1alpha1",
|
||||
"storage/v1",
|
||||
"storage/v1alpha1",
|
||||
"storage/v1beta1"
|
||||
"storage/v1beta1",
|
||||
]
|
||||
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
|
||||
version = "kubernetes-1.11.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "fd83cbc87e7632ccd8bbab63d2b673d4e0c631cc"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b8a1dcc5f4e559b7af185ba12dd341cb8c175ea3d36227a02699b251ae5fde05"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1"
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
]
|
||||
revision = "07bbbb7a28a34c56bf9d1b192a88cc9b2350095e"
|
||||
pruneopts = "NUT"
|
||||
revision = "1748dfb29e8a4432b78514bc88a1b07937a9805a"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.11"
|
||||
digest = "1:ca279c0bb7a72618aff5b77440d5a5e2f92857fdb7e0e4c7a1a77a7895929c49"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
@@ -627,6 +799,7 @@
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/mergepatch",
|
||||
"pkg/util/naming",
|
||||
"pkg/util/net",
|
||||
"pkg/util/remotecommand",
|
||||
"pkg/util/runtime",
|
||||
@@ -640,11 +813,26 @@
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/json",
|
||||
"third_party/forked/golang/netutil",
|
||||
"third_party/forked/golang/reflect"
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
|
||||
pruneopts = "NUT"
|
||||
revision = "6dd46049f39503a1fc8d65de4bd566829e95faff"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.12"
|
||||
digest = "1:7991e5074de01462e0cf6ef77060895b50e9026d16152a6e925cb99b67a1f8ae"
|
||||
name = "k8s.io/cli-runtime"
|
||||
packages = [
|
||||
"pkg/genericclioptions",
|
||||
"pkg/genericclioptions/printers",
|
||||
"pkg/genericclioptions/resource",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "11047e25a94a7eaa541b92a8bbfd3e1243607219"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d9f76731330e62bede1e4eb9d519b282a26621a5368e5db1a18a8eb1ccda1ff"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
@@ -661,12 +849,15 @@
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
"informers/autoscaling/v2beta2",
|
||||
"informers/batch",
|
||||
"informers/batch/v1",
|
||||
"informers/batch/v1beta1",
|
||||
"informers/batch/v2alpha1",
|
||||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/coordination",
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/events",
|
||||
@@ -704,10 +895,12 @@
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta2",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
@@ -729,10 +922,12 @@
|
||||
"listers/apps/v1beta2",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/autoscaling/v2beta2",
|
||||
"listers/batch/v1",
|
||||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
@@ -781,32 +976,129 @@
|
||||
"util/integer",
|
||||
"util/jsonpath",
|
||||
"util/retry",
|
||||
"util/workqueue"
|
||||
"util/workqueue",
|
||||
]
|
||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
||||
version = "v8.0.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "1638f8970cefaa404ff3a62950f88b08292b2696"
|
||||
version = "v9.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "d83b052f768a50a309c692a9c271da3f3276ff88"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8a9b1e755afd7ea778cd451a955977eb3fe0abcc4e32079644b6b7afc42d7ff8"
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = [
|
||||
"pkg/kubectl/genericclioptions",
|
||||
"pkg/kubectl/genericclioptions/printers",
|
||||
"pkg/kubectl/genericclioptions/resource",
|
||||
"pkg/kubectl/scheme",
|
||||
"pkg/printers"
|
||||
"pkg/printers",
|
||||
]
|
||||
revision = "91e7b4fd31fcd3d5f436da26c980becec37ceefe"
|
||||
version = "v1.11.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "51dd616cdd25d6ee22c83a858773b607328a18ec"
|
||||
version = "v1.12.5"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "7979aebee2c67e7fa68bddf050ef32b75a2f51145d26d00a54f6bf489af635a2"
|
||||
input-imports = [
|
||||
"cloud.google.com/go/storage",
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute",
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage",
|
||||
"github.com/Azure/azure-sdk-for-go/storage",
|
||||
"github.com/Azure/go-autorest/autorest",
|
||||
"github.com/Azure/go-autorest/autorest/adal",
|
||||
"github.com/Azure/go-autorest/autorest/azure",
|
||||
"github.com/Azure/go-autorest/autorest/to",
|
||||
"github.com/aws/aws-sdk-go/aws",
|
||||
"github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints",
|
||||
"github.com/aws/aws-sdk-go/aws/request",
|
||||
"github.com/aws/aws-sdk-go/aws/session",
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4",
|
||||
"github.com/aws/aws-sdk-go/service/ec2",
|
||||
"github.com/aws/aws-sdk-go/service/s3",
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"github.com/evanphx/json-patch",
|
||||
"github.com/golang/glog",
|
||||
"github.com/golang/protobuf/proto",
|
||||
"github.com/hashicorp/go-hclog",
|
||||
"github.com/hashicorp/go-plugin",
|
||||
"github.com/joho/godotenv",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/robfig/cron",
|
||||
"github.com/satori/go.uuid",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/spf13/afero",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/pflag",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/mock",
|
||||
"github.com/stretchr/testify/require",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/oauth2",
|
||||
"golang.org/x/oauth2/google",
|
||||
"google.golang.org/api/compute/v1",
|
||||
"google.golang.org/api/googleapi",
|
||||
"google.golang.org/api/iterator",
|
||||
"google.golang.org/api/option",
|
||||
"google.golang.org/grpc",
|
||||
"google.golang.org/grpc/codes",
|
||||
"google.golang.org/grpc/status",
|
||||
"k8s.io/api/apps/v1",
|
||||
"k8s.io/api/apps/v1beta1",
|
||||
"k8s.io/api/batch/v1",
|
||||
"k8s.io/api/core/v1",
|
||||
"k8s.io/api/rbac/v1",
|
||||
"k8s.io/api/rbac/v1beta1",
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1",
|
||||
"k8s.io/apimachinery/pkg/api/equality",
|
||||
"k8s.io/apimachinery/pkg/api/errors",
|
||||
"k8s.io/apimachinery/pkg/api/meta",
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
|
||||
"k8s.io/apimachinery/pkg/labels",
|
||||
"k8s.io/apimachinery/pkg/runtime",
|
||||
"k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"k8s.io/apimachinery/pkg/types",
|
||||
"k8s.io/apimachinery/pkg/util/clock",
|
||||
"k8s.io/apimachinery/pkg/util/duration",
|
||||
"k8s.io/apimachinery/pkg/util/errors",
|
||||
"k8s.io/apimachinery/pkg/util/runtime",
|
||||
"k8s.io/apimachinery/pkg/util/sets",
|
||||
"k8s.io/apimachinery/pkg/util/validation",
|
||||
"k8s.io/apimachinery/pkg/util/wait",
|
||||
"k8s.io/apimachinery/pkg/watch",
|
||||
"k8s.io/client-go/discovery",
|
||||
"k8s.io/client-go/discovery/fake",
|
||||
"k8s.io/client-go/dynamic",
|
||||
"k8s.io/client-go/informers",
|
||||
"k8s.io/client-go/informers/core/v1",
|
||||
"k8s.io/client-go/kubernetes",
|
||||
"k8s.io/client-go/kubernetes/scheme",
|
||||
"k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
"k8s.io/client-go/kubernetes/typed/rbac/v1",
|
||||
"k8s.io/client-go/kubernetes/typed/rbac/v1beta1",
|
||||
"k8s.io/client-go/listers/core/v1",
|
||||
"k8s.io/client-go/plugin/pkg/client/auth/azure",
|
||||
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
|
||||
"k8s.io/client-go/plugin/pkg/client/auth/oidc",
|
||||
"k8s.io/client-go/rest",
|
||||
"k8s.io/client-go/restmapper",
|
||||
"k8s.io/client-go/testing",
|
||||
"k8s.io/client-go/tools/cache",
|
||||
"k8s.io/client-go/tools/clientcmd",
|
||||
"k8s.io/client-go/tools/remotecommand",
|
||||
"k8s.io/client-go/util/flowcontrol",
|
||||
"k8s.io/client-go/util/workqueue",
|
||||
"k8s.io/kubernetes/pkg/printers",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
58
Gopkg.toml
58
Gopkg.toml
@@ -31,31 +31,28 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.11"
|
||||
version = "~1.12"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "~8.0"
|
||||
version = "~9.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
# vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:104:16:
|
||||
# unknown field 'CaseSensitive' in struct literal of type jsoniter.Config
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
# k8s.io/client-go v9.0 uses f2b4162afba35581b6d4a50d3b8f34e33c144682 (released in v1.1.4)
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
# vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go:300:25:
|
||||
# cannot call non-function spt.Token (type adal.Token)
|
||||
[[override]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
revision = "1ff28809256a84bb6966640ff3d0371af82ccba4"
|
||||
version = "~1.1.4"
|
||||
|
||||
#
|
||||
# Cloud provider packages
|
||||
@@ -66,7 +63,12 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
version = "~11.3.0-beta"
|
||||
version = "~19.0.0"
|
||||
|
||||
# k8s.io/client-go v9.0 uses bca49d5b51a50dc5bb17bbf6204c711c6dbded06 (v10.14.0)
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
version = "~10.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
@@ -74,7 +76,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/api"
|
||||
branch = "master"
|
||||
version = "~v0.3.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/oauth2"
|
||||
@@ -91,15 +93,9 @@
|
||||
name = "github.com/robfig/cron"
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
# TODO(1.0) this repo is a redirect to github.com/satori/go.uuid. Our
|
||||
# current version of azure-sdk-for-go references this redirect, so
|
||||
# use it so we don't get a duplicate copy of this dependency.
|
||||
# Once our azure-sdk-for-go is updated to a newer version (where
|
||||
# their dependency has changed to .../go.uuid), switch this to
|
||||
# github.com/satori/go.uuid
|
||||
[[constraint]]
|
||||
name = "github.com/satori/uuid"
|
||||
version = "1.1.0"
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "~1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/afero"
|
||||
@@ -119,4 +115,20 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
revision = "3f118e8ee104b6f22aeb12453fab56aed1356186"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "~v1.3.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "~v1.19.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/joho/godotenv"
|
||||
version = "~v1.3.0"
|
||||
|
||||
[[override]]
|
||||
name = "golang.org/x/sys"
|
||||
branch = "master"
|
||||
|
||||
22
Makefile
22
Makefile
@@ -1,6 +1,6 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Modifications Copyright 2017 the Heptio Ark contributors.
|
||||
# Modifications Copyright 2017 the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,10 +15,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
# The binary to build (just the basename).
|
||||
BIN ?= ark
|
||||
BIN ?= velero
|
||||
|
||||
# This repo's root import path (under GOPATH).
|
||||
PKG := github.com/heptio/ark
|
||||
PKG := github.com/heptio/velero
|
||||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= gcr.io/heptio-images
|
||||
@@ -47,7 +47,7 @@ GOARCH = $(word 2, $(platform_temp))
|
||||
# TODO(ncdc): support multiple image architectures once gcr.io supports manifest lists
|
||||
# Set default base image dynamically for each arch
|
||||
ifeq ($(GOARCH),amd64)
|
||||
DOCKERFILE ?= Dockerfile-$(BIN).alpine
|
||||
DOCKERFILE ?= Dockerfile-$(BIN)
|
||||
endif
|
||||
#ifeq ($(GOARCH),arm)
|
||||
# DOCKERFILE ?= Dockerfile.arm #armel/busybox
|
||||
@@ -63,7 +63,7 @@ IMAGE = $(REGISTRY)/$(BIN)
|
||||
# If you want to build AND push all containers, see the 'all-push' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@$(MAKE) build BIN=ark-restic-restore-helper
|
||||
@$(MAKE) build BIN=velero-restic-restore-helper
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@@ -104,7 +104,7 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
||||
|
||||
TTY := $(shell tty -s && echo "-t")
|
||||
|
||||
BUILDER_IMAGE := ark-builder
|
||||
BUILDER_IMAGE := velero-builder
|
||||
|
||||
# Example: make shell CMD="date > datefile"
|
||||
shell: build-dirs build-image
|
||||
@@ -132,7 +132,7 @@ DOTFILE_IMAGE = $(subst :,_,$(subst /,_,$(IMAGE))-$(VERSION))
|
||||
build-fsfreeze: BIN = fsfreeze-pause
|
||||
build-fsfreeze:
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN).alpine
|
||||
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN).alpine _output
|
||||
@docker build --pull -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN).alpine _output
|
||||
@docker images -q $(IMAGE):$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
push-fsfreeze: BIN = fsfreeze-pause
|
||||
@@ -146,13 +146,13 @@ endif
|
||||
|
||||
all-containers:
|
||||
$(MAKE) container
|
||||
$(MAKE) container BIN=ark-restic-restore-helper
|
||||
$(MAKE) container BIN=velero-restic-restore-helper
|
||||
$(MAKE) build-fsfreeze
|
||||
|
||||
container: verify test .container-$(DOTFILE_IMAGE) container-name
|
||||
.container-$(DOTFILE_IMAGE): _output/bin/$(GOOS)/$(GOARCH)/$(BIN) $(DOCKERFILE)
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH)
|
||||
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
|
||||
@docker build --pull -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
|
||||
@docker images -q $(IMAGE):$(VERSION) > $@
|
||||
|
||||
container-name:
|
||||
@@ -160,7 +160,7 @@ container-name:
|
||||
|
||||
all-push:
|
||||
$(MAKE) push
|
||||
$(MAKE) push BIN=ark-restic-restore-helper
|
||||
$(MAKE) push BIN=velero-restic-restore-helper
|
||||
$(MAKE) push-fsfreeze
|
||||
|
||||
|
||||
@@ -200,7 +200,7 @@ build-dirs:
|
||||
@mkdir -p .go/src/$(PKG) .go/pkg .go/bin .go/std/$(GOOS)/$(GOARCH) .go/go-build
|
||||
|
||||
build-image:
|
||||
cd hack/build-image && docker build -t $(BUILDER_IMAGE) .
|
||||
cd hack/build-image && docker build --pull -t $(BUILDER_IMAGE) .
|
||||
|
||||
clean:
|
||||
rm -rf .container-* _output/.dockerfile-* .push-*
|
||||
|
||||
50
README.md
50
README.md
@@ -1,35 +1,39 @@
|
||||
# Heptio Ark
|
||||
![100]
|
||||
|
||||
**Maintainers:** [Heptio][0]
|
||||
|
||||
[![Build Status][1]][2] <a href="https://zenhub.com"><img src="https://raw.githubusercontent.com/ZenHubIO/support/master/zenhub-badge.png"></a>
|
||||
[![Build Status][1]][2]
|
||||
|
||||
## Overview
|
||||
|
||||
Ark gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Ark lets you:
|
||||
Velero (formerly Heptio Ark) gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Velero lets you:
|
||||
|
||||
* Take backups of your cluster and restore in case of loss.
|
||||
* Copy cluster resources to other clusters.
|
||||
* Replicate your production environment for development and testing environments.
|
||||
|
||||
Ark consists of:
|
||||
Velero consists of:
|
||||
|
||||
* A server that runs on your cluster
|
||||
* A command-line client that runs locally
|
||||
|
||||
You can run Ark in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99].
|
||||
You can run Velero in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99].
|
||||
|
||||
## Breaking changes
|
||||
## Installation
|
||||
|
||||
Ark version 0.10.0 introduces a number of breaking changes. Before you upgrade to version 0.10.0, make sure to read [the documentation on upgrading][98].
|
||||
We strongly recommend that you use an [official release][6] of Velero. The tarballs for each release contain the
|
||||
command-line client **and** version-specific sample YAML files for deploying Velero to your cluster.
|
||||
Follow the instructions under the **Install** section of [our documentation][29] to get started.
|
||||
|
||||
_The code and sample YAML files in the master branch of the Velero repository are under active development and are not guaranteed to be stable. Use them at your own risk!_
|
||||
|
||||
## More information
|
||||
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Ark, and more.
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Velero, and more.
|
||||
|
||||
Please use the version selector at the top of the site to ensure you are using the appropriate documentation for your version of Velero.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server.
|
||||
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#velero channel][25] on the Kubernetes Slack server.
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -51,29 +55,27 @@ Feedback and discussion are available on [the mailing list][24].
|
||||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[0]: https://github.com/heptio
|
||||
[1]: https://travis-ci.org/heptio/ark.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/ark
|
||||
[1]: https://travis-ci.org/heptio/velero.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/velero
|
||||
|
||||
[4]: https://github.com/heptio/ark/issues
|
||||
[5]: https://github.com/heptio/ark/blob/master/CONTRIBUTING.md
|
||||
[6]: https://github.com/heptio/ark/releases
|
||||
[4]: https://github.com/heptio/velero/issues
|
||||
[5]: https://github.com/heptio/velero/blob/master/CONTRIBUTING.md
|
||||
[6]: https://github.com/heptio/velero/releases
|
||||
|
||||
[8]: https://github.com/heptio/ark/blob/master/CODE_OF_CONDUCT.md
|
||||
[8]: https://github.com/heptio/velero/blob/master/CODE_OF_CONDUCT.md
|
||||
[9]: https://kubernetes.io/docs/setup/
|
||||
[10]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos
|
||||
[11]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1
|
||||
[12]: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/README.md
|
||||
[14]: https://github.com/kubernetes/kubernetes
|
||||
|
||||
|
||||
[24]: http://j.hept.io/ark-list
|
||||
[25]: https://kubernetes.slack.com/messages/ark-dr
|
||||
[26]: https://github.com/heptio/ark/blob/master/docs/zenhub.md
|
||||
[24]: https://groups.google.com/forum/#!forum/projectvelero
|
||||
[25]: https://kubernetes.slack.com/messages/velero
|
||||
[26]: https://github.com/heptio/velero/blob/master/docs/zenhub.md
|
||||
|
||||
|
||||
[29]: https://heptio.github.io/ark/
|
||||
[29]: https://heptio.github.io/velero/
|
||||
[30]: /docs/troubleshooting.md
|
||||
|
||||
[98]: /docs/upgrading-to-v0.10.md
|
||||
[99]: /docs/support-matrix.md
|
||||
[100]: /docs/img/velero.png
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Ark Support
|
||||
# Velero Support
|
||||
|
||||
Thanks for trying out Ark! We welcome all feedback, please consider joining our mailing list:
|
||||
Thanks for trying out Velero! We welcome all feedback, please consider joining our mailing list:
|
||||
|
||||
- [Mailing List](http://j.hept.io/ark-list)
|
||||
- [Mailing List](https://groups.google.com/forum/#!forum/projectvelero)
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
- [v0.10.2](#v0102)
|
||||
- [v0.10.1](#v0101)
|
||||
- [v0.10.0](#v0100)
|
||||
|
||||
## v0.10.2
|
||||
#### 2019-02-28
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.10.2
|
||||
|
||||
### Changes
|
||||
* upgrade restic to v0.9.4 & replace --hostname flag with --host (#1156, @skriss)
|
||||
* use 'restic stats' instead of 'restic check' to determine if repo exists (#1171, @skriss)
|
||||
* Fix concurrency bug in code ensuring restic repository exists (#1235, @skriss)
|
||||
|
||||
## v0.10.1
|
||||
#### 2019-01-10
|
||||
|
||||
@@ -245,5 +257,5 @@ need to be updated for v0.10.
|
||||
- [eabef085](https://github.com/heptio/ark/commit/eabef085) Update generated Ark code based on the 1.11 k8s.io/code-generator script
|
||||
- [f5eac0b4](https://github.com/heptio/ark/commit/f5eac0b4) Update vendored library code for Kubernetes 1.11
|
||||
|
||||
[1]: https://github.com/heptio/ark/blob/master/docs/upgrading-to-v0.10.md
|
||||
[1]: https://heptio.github.io/velero/v0.10.0/upgrading-to-v0.10
|
||||
[2]: locations.md
|
||||
|
||||
32
changelogs/CHANGELOG-0.11.md
Normal file
32
changelogs/CHANGELOG-0.11.md
Normal file
@@ -0,0 +1,32 @@
|
||||
## v0.11.1-beta.1
|
||||
#### 2019-05-03
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/velero/releases/tag/v0.11.1-beta.1
|
||||
|
||||
### Highlights
|
||||
* Added the `velero migrate-backups` command to migrate legacy Ark backup metadata to the current Velero format in object storage. This command needs to be run in preparation for upgrading to v1.0, **if** you have backups that were originally created prior to v0.11 (i.e. when the project was named Ark).
|
||||
|
||||
## v0.11.0
|
||||
#### 2019-02-28
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/velero/releases/tag/v0.11.0
|
||||
|
||||
### Highlights
|
||||
* Heptio Ark is now Velero! This release is the first one to use the new name. For details on the changes and how to migrate to v0.11, see the [migration instructions][1]. **Please follow the instructions to ensure a successful upgrade to v0.11.**
|
||||
* Restic has been upgraded to v0.9.4, which brings significantly faster restores thanks to a new multi-threaded restorer.
|
||||
* Velero now waits for terminating namespaces and persistent volumes to delete before attempting to restore them, rather than trying and failing to restore them while they're being deleted.
|
||||
|
||||
### All Changes
|
||||
* Fix concurrency bug in code ensuring restic repository exists (#1235, @skriss)
|
||||
* Wait for PVs and namespaces to delete before attempting to restore them. (#826, @nrb)
|
||||
* Set the zones for GCP regional disks on restore. This requires the `compute.zones.get` permission on the GCP serviceaccount in order to work correctly. (#1200, @nrb)
|
||||
* Renamed Heptio Ark to Velero. Changed internal imports, environment variables, and binary name. (#1184, @nrb)
|
||||
* use 'restic stats' instead of 'restic check' to determine if repo exists (#1171, @skriss)
|
||||
* upgrade restic to v0.9.4 & replace --hostname flag with --host (#1156, @skriss)
|
||||
* Clarify restore log when object unchanged (#1153, @daved)
|
||||
* Add backup-version file in backup tarball. (#1117, @wwitzel3)
|
||||
* add ServerStatusRequest CRD and show server version in `ark version` output (#1116, @skriss)
|
||||
|
||||
[1]: https://heptio.github.io/velero/v0.11.0/migrating-to-velero
|
||||
@@ -77,9 +77,9 @@
|
||||
here are the steps you can take to upgrade:
|
||||
|
||||
1. Execute the steps from the **Credentials and configuration** section for your cloud:
|
||||
* [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#credentials-and-configuration)
|
||||
* [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#credentials-and-configuration)
|
||||
* [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#credentials-and-configuration)
|
||||
* [AWS](https://heptio.github.io/velero/v0.8.0/aws-config#credentials-and-configuration)
|
||||
* [Azure](https://heptio.github.io/velero/v0.8.0/azure-config#credentials-and-configuration)
|
||||
* [GCP](https://heptio.github.io/velero/v0.8.0/gcp-config#credentials-and-configuration)
|
||||
|
||||
When you get to the secret creation step, if you don't have your `credentials-ark` file handy,
|
||||
you can copy the existing secret from your `heptio-ark-server` namespace into the `heptio-ark` namespace:
|
||||
@@ -95,6 +95,6 @@
|
||||
```
|
||||
|
||||
3. Execute the commands from the **Start the server** section for your cloud:
|
||||
* [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#start-the-server)
|
||||
* [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#start-the-server)
|
||||
* [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#start-the-server)
|
||||
* [AWS](https://heptio.github.io/velero/v0.8.0/aws-config#start-the-server)
|
||||
* [Azure](https://heptio.github.io/velero/v0.8.0/azure-config#start-the-server)
|
||||
* [GCP](https://heptio.github.io/velero/v0.8.0/gcp-config#start-the-server)
|
||||
|
||||
189
changelogs/CHANGELOG-1.0.md
Normal file
189
changelogs/CHANGELOG-1.0.md
Normal file
@@ -0,0 +1,189 @@
|
||||
## v1.0.0-rc.1
|
||||
#### 2019-05-10
|
||||
|
||||
This is our first release candidate for v1.0. See the **All Changes** section below for details of changes since `v1.0.0-beta.1`. We expect this to be the final release prior to the general availability of `v1.0.0`. Please try it out in your non-critical environments!
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/velero/releases/tag/v1.0.0-rc.1
|
||||
|
||||
### Container Image
|
||||
`gcr.io/heptio-images/velero:v1.0.0-rc.1`
|
||||
|
||||
### Documentation
|
||||
https://heptio.github.io/velero/v1.0.0-rc.1/
|
||||
|
||||
### All Changes
|
||||
* `velero backup download`: check for backup existence before attempting download (#1447, @fabito)
|
||||
* use the discovery helper's cached resources list when resolving resource short names (#1457, @skriss)
|
||||
* add `backup_last_successful_timestamp` prometheus metric (#1448, @fabito)
|
||||
* `velero install`: add `--use-volume-snapshots` flag to optionally turn off creation of volume snapshot locations (#1462, @nrb)
|
||||
|
||||
## v1.0.0-beta.1
|
||||
#### 2019-05-03
|
||||
|
||||
We're excited to release our first beta for v1.0! This beta includes all key features for v1.0 plus a number of bug fixes and documentation updates. See the **All Changes** section below for details. Please test it out in your non-critical environments!
|
||||
|
||||
We'll continue to fix bugs and make minor changes, and we expect to ship at least one more beta or release candidate prior to the general availability of v1.0.0.
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/velero/releases/tag/v1.0.0-beta.1
|
||||
|
||||
### Container Image
|
||||
`gcr.io/heptio-images/velero:v1.0.0-beta.1`
|
||||
|
||||
### Documentation
|
||||
https://heptio.github.io/velero/v1.0.0-beta.1/
|
||||
|
||||
### All Changes
|
||||
* Add PartiallyFailed phase for restores (#1389, @skriss)
|
||||
* Add PartiallyFailed phase for backups, log + continue on errors during backup process (#1386, @skriss)
|
||||
* Switch from `restic stats` to `restic snapshots` for checking restic repository existence (#1416, @skriss)
|
||||
* Disallow bucket names starting with '-' (#1407, @nrb)
|
||||
* Shorten label values when they're longer than 63 characters (#1392, @anshulc)
|
||||
* Fail backup if it already exists in object storage. (#1390, @ncdc,carlisia)
|
||||
* Install command: Use `latest` image tag if no version information is provided at build time (#1439, @nrb)
|
||||
* GCP: add optional 'project' config to volume snapshot location for if snapshots are in a different project than the IAM account (#1405, @skriss)
|
||||
* Azure: restore disks with zone information if it exists (#1298, @sylr)
|
||||
* Replace config/ with examples/ in release tarball (#1406, @skriss)
|
||||
|
||||
|
||||
## v1.0.0-alpha.2
|
||||
#### 2019-04-24
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/velero/releases/tag/v1.0.0-alpha.2
|
||||
|
||||
### Container Image
|
||||
`gcr.io/heptio-images/velero:v1.0.0-alpha.2`
|
||||
|
||||
### Highlights
|
||||
Our second v1.0 alpha is ready for testing! Please try it out in your non-critial environments. This alpha contains a bunch of bug fixes and smaller enhancements. See the **All Changes** section below for details.
|
||||
|
||||
We expect that our next release will be `v1.0.0-beta.1`, meaning that all key features for v1.0.0 will be included. Following that release, we'll continue to fix
|
||||
bugs and make minor improvements, and we expect to ship at least one more beta and/or release candidate prior to the general availability of v1.0.0.
|
||||
|
||||
### All Changes
|
||||
* restic repo ensurer: return error if new repository does not become ready within a minute, and fix channel closing/deletion (#1367, @skriss)
|
||||
* remove deprecated "hooks" for backups (they've been replaced by "pre hooks") (#1384, @skriss)
|
||||
* fix setting up restic identifiers when fully-qualified plugin names are used (#1377, @jmontleon)
|
||||
* add `--namespace` flag to `velero install` (@1380, @nrb)
|
||||
* GCP: allow `storageLocation` to be specified as a config parameter for VolumeSnapshotLocations (#1375, @ctrox)
|
||||
* add new prometheus gauge metrics `backup_total` and `restore_total` (#1353, @fabito)
|
||||
* update install docs to use `velero install` (#1376 #1393 #1394, @nrb and @skriss)
|
||||
* fix panic in API discovery when 1+ API groups cannot be reached (#1399, @skriss)
|
||||
* fail backup if it already exists in object storage (#1390, @carlisia and @ncdc)
|
||||
|
||||
## v1.0.0-alpha.1
|
||||
#### 2019-04-15
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/velero/releases/tag/v1.0.0-alpha.1
|
||||
|
||||
### Highlights
|
||||
We're excited to release our first alpha for v1.0! Please take it for a spin in your non-critical environments. Although we've finished the majority of the planned development work for v1.0, we are still working on a handful of items, so don't consider this alpha release to be fully feature-complete. Here's a quick rundown of the major changes in this release:
|
||||
|
||||
- We've added a new command, `velero install`, to make it easier to get up and running with Velero
|
||||
- We've made a bunch of improvements to the plugin framework:
|
||||
- we've reorganized the relevant packages to minimize the import surface for plugin authors
|
||||
- all plugins are now wrapped in panic handlers that will report information on panics back to Velero
|
||||
- Velero's `--log-level` flag is now passed to plugin implementations
|
||||
- Errors logged within plugins are now annotated with the file/line of where the error occurred
|
||||
- Restore item actions can now optionally return a list of additional related items that should be restored
|
||||
- Restore item actions can now indicate that an item *should not* be restored
|
||||
- The restic restore helper image used by Velero can now optionally be overridden via config map
|
||||
|
||||
### Breaking & Notable Changes
|
||||
|
||||
#### API
|
||||
* All legacy Ark data types and pre-1.0 compatibility code has been removed. Users should migrate any backups created pre-v0.11.0 with the v0.11.1 migration command (not yet released)
|
||||
|
||||
#### Azure
|
||||
* During installation, the `cloud-credentials` secret can now be created from a file, whose contents look like the following:
|
||||
```
|
||||
AZURE_TENANT_ID=${AZURE_TENANT_ID}
|
||||
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
|
||||
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
|
||||
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
|
||||
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
|
||||
```
|
||||
When using this method, the `cloud-credentials` secret should be mounted as a volume into the Velero deployment and daemon set, at the path `/credentials`. Additionally, the `$AZURE_CREDENTIALS_FILE` environment variable should be set to `/credentials/cloud` (the location of the file within the Velero pods). Note that `velero install` always uses this method of providing credentials for Azure.
|
||||
|
||||
#### Image
|
||||
* The base container image has been switched to `debian:stretch-slim`
|
||||
|
||||
#### Plugin Development
|
||||
* `BlockStore` plugins are now named `VolumeSnapshotter` plugins
|
||||
* Plugin APIs have moved to reduce the import surface:
|
||||
* Plugin gRPC servers live in `github.com/heptio/velero/pkg/plugin/framework`
|
||||
* Plugin interface types live in `github.com/heptio/velero/pkg/plugin/velero`
|
||||
* RestoreItemAction interface now takes the original item from the backup as a parameter
|
||||
* RestoreItemAction plugins can now return additional items to restore
|
||||
* RestoreItemAction plugins can now skip restoring an item
|
||||
* Plugins may now send stack traces with errors to the Velero server, so that the errors may be put into the server log
|
||||
* Plugins must now be "namespaced," using `example.domain.com/plugin-name` format
|
||||
* For external ObjectStore and VolumeSnapshotter plugins. this name will also be the provider name in BackupStorageLoction and VolumeSnapshotLocation objects
|
||||
* `--log-level` flag is now passed to all plugins
|
||||
|
||||
#### Validation
|
||||
* Configs for Azure, AWS, and GCP are now checked for invalid or extra keys, and the server is halted if any are found
|
||||
|
||||
### All Changes
|
||||
* change container base images to debian:stretch-slim and upgrade to go 1.12 (#1365, @skriss)
|
||||
* Azure: allow credentials to be provided in a .env file (#1364, @skriss)
|
||||
* remove deprecated code in preparation for v1.0 release:
|
||||
- remove ark.heptio.com API group
|
||||
- remove support for reading ark-backup.json files from object storage
|
||||
- remove Ark field from RestoreResult type
|
||||
- remove support for "hook.backup.ark.heptio.com/..." annotations for specifying hooks
|
||||
- remove support for $HOME/.config/ark/ client config directory
|
||||
- remove support for restoring Azure snapshots using short snapshot ID formats in backup metadata
|
||||
- stop applying "velero-restore" label to restored resources and remove it from the API pkg
|
||||
- remove code that strips the "gc.ark.heptio.com" finalizer from backups
|
||||
- remove support for "backup.ark.heptio.com/..." annotations for requesting restic backups
|
||||
- remove "ark"-prefixed prometheus metrics
|
||||
- remove VolumeBackups field and related code from Backup's status (#1323, @skriss)
|
||||
* Add velero install command for basic use cases. (#1287, @nrb)
|
||||
* Support non-namespaced names for built-in plugins (#1366, @nrb)
|
||||
* instantiate the plugin manager with the per-restore logger so plugin logs are captured in the per-restore log (#1358, @skriss)
|
||||
* Validate that there can't be any duplicate plugin name, and that the name format is `example.io/name`. (#1339, @carlisia)
|
||||
* Added ability to dynamically disable controllers (#1326, @amanw)
|
||||
* set default TTL for backups (#1352, @vorar)
|
||||
* aws/azure/gcp: fail fast if unsupported keys are provided in BackupStorageLocation/VolumeSnapshotLocation config (#1338, @skriss)
|
||||
* velero backup logs & velero restore logs: show helpful error message if backup/restore does not exist or is not finished processing (#1337, @skriss)
|
||||
* Add support for allowing a RestoreItemAction to skip item restore. (#1336, @sseago)
|
||||
* Improve error message around invalid S3 URLs, and gracefully handle trailing backslashes. (#1331, @skriss)
|
||||
* set backup's start timestamp before patching it to InProgress so start times display in `velero backup get` while in progress (#1330, @skriss)
|
||||
* rename BlockStore plugin to VolumeSnapshotter (#1321, @skriss)
|
||||
* Bump plugin ProtocolVersion to version 2 (#1319, @carlisia)
|
||||
* remove Warning field from restore item action output (#1318, @skriss)
|
||||
* Fix for #1312, use describe to determine if AWS EBS snapshot is encrypted and explicitly pass that value in EC2 CreateVolume call. (#1316, @mstump)
|
||||
* Allow restic restore helper image name to be optionally specified via ConfigMap (#1311, @skriss)
|
||||
* compile only once to lower the initialization cost for regexp.MustCompile. (#1306, @pei0804)
|
||||
* enable restore item actions to return additional related items to be restored; have pods return PVCs and PVCs return PVs (#1304, @skriss)
|
||||
* log error locations from plugin logger, and don't overwrite them in the client logger if they exist already (#1301, @skriss)
|
||||
* Send stack traces from plugin errors to Velero via gRPC so error location info can be logged (#1300, @skriss)
|
||||
* check for and exclude hostPath-based persistent volumes from restic backup (#1297, @skriss)
|
||||
* make resticrepositories non-restorable resources (#1296, @skriss)
|
||||
* gracefully handle failed API groups from the discovery API (#1293, @fabito)
|
||||
* Collect 3 new metrics: backup_deletion_{attempt|failure|success}_total (#1280, @fabito)
|
||||
* Pass --log-level flag to internal/external plugins, matching Velero server's log level (#1278, @skriss)
|
||||
* AWS EBS Volume IDs now contain AZ (#1274, @tsturzl)
|
||||
* add panic handlers to all server-side plugin methods (#1270, @skriss)
|
||||
* Move all the interfaces and associated types necessary to implement all of the Velero plugins to under the new package `pkg/plugin/velero`. (#1264, @carlisia)
|
||||
* Update velero restore to not open every single file open during extraction of the data (#1261, @asaf)
|
||||
* remove restore code that waits for a PV to become Available (#1254, @skriss)
|
||||
* Improve `describe` output:
|
||||
* Move Phase to right under Metadata(name/namespace/label/annotations)
|
||||
* Move Validation errors: section right after Phase: section and only show it if the item has a phase of FailedValidation
|
||||
* For restores move Warnings and Errors under Validation errors. Leave their display as is. (#1248, @DheerajSShetty)
|
||||
* don't remove storageclass from a persistent volume when restoring it (#1246, @skriss)
|
||||
* Need to defer closing the the ReadCloser in ObjectStoreGRPCServer.GetObject (#1236, @DheerajSShetty)
|
||||
* update Kubernetes dependencies to match v1.12, and update Azure SDK to v19.0.0 (GA) (#1231, @skriss)
|
||||
* remove pkg/util/collections/map_utils.go, replace with structured API types and apimachinery's unstructured helpers (#1146, @skriss)
|
||||
* Add original resource (from backup) to restore item action interface (#1123, @mwieczorek)
|
||||
|
||||
### Coming in Future Alpha/Beta Releases:
|
||||
- backup & restore phases will be modified to more clearly indicate successes, failures, and partial failures
|
||||
- additional safety checks to ensure backups are never overwritten in object storage
|
||||
- revised installation documentation that takes advantage of the `velero install` command
|
||||
- as many additional stability and UX issues as we can get to
|
||||
1
changelogs/unreleased/1123-mwieczorek
Normal file
1
changelogs/unreleased/1123-mwieczorek
Normal file
@@ -0,0 +1 @@
|
||||
Add original resource (from backup) to restore item action interface
|
||||
1
changelogs/unreleased/1146-skriss
Normal file
1
changelogs/unreleased/1146-skriss
Normal file
@@ -0,0 +1 @@
|
||||
remove pkg/util/collections/map_utils.go, replace with structured API types and apimachinery's unstructured helpers
|
||||
1
changelogs/unreleased/1231-skriss
Normal file
1
changelogs/unreleased/1231-skriss
Normal file
@@ -0,0 +1 @@
|
||||
update Kubernetes dependencies to match v1.12, and update Azure SDK to v19.0.0 (GA)
|
||||
1
changelogs/unreleased/1236-DheerajSShetty
Normal file
1
changelogs/unreleased/1236-DheerajSShetty
Normal file
@@ -0,0 +1 @@
|
||||
Need to defer closing the the ReadCloser in ObjectStoreGRPCServer.GetObject
|
||||
1
changelogs/unreleased/1246-skriss
Normal file
1
changelogs/unreleased/1246-skriss
Normal file
@@ -0,0 +1 @@
|
||||
don't remove storageclass from a persistent volume when restoring it
|
||||
6
changelogs/unreleased/1248-DheerajSShetty
Normal file
6
changelogs/unreleased/1248-DheerajSShetty
Normal file
@@ -0,0 +1,6 @@
|
||||
Improve `describe` output
|
||||
* Move Phase to right under Metadata(name/namespace/label/annotations)
|
||||
* Move Validation errors: section right after Phase: section and only
|
||||
show it if the item has a phase of FailedValidation
|
||||
* For restores move Warnings and Errors under Validation errors. Leave
|
||||
their display as is.
|
||||
1
changelogs/unreleased/1254-skriss
Normal file
1
changelogs/unreleased/1254-skriss
Normal file
@@ -0,0 +1 @@
|
||||
remove restore code that waits for a PV to become Available
|
||||
1
changelogs/unreleased/1261-asaf-erlich
Normal file
1
changelogs/unreleased/1261-asaf-erlich
Normal file
@@ -0,0 +1 @@
|
||||
Update velero restore to not open every single file open during extraction of the data
|
||||
1
changelogs/unreleased/1264-carlisia
Normal file
1
changelogs/unreleased/1264-carlisia
Normal file
@@ -0,0 +1 @@
|
||||
Move all the interfaces and associated types necessary to implement all of the Velero plugins to under the new package `velero`.
|
||||
1
changelogs/unreleased/1270-skriss
Normal file
1
changelogs/unreleased/1270-skriss
Normal file
@@ -0,0 +1 @@
|
||||
add panic handlers to all server-side plugin methods
|
||||
1
changelogs/unreleased/1274-tsturzl
Normal file
1
changelogs/unreleased/1274-tsturzl
Normal file
@@ -0,0 +1 @@
|
||||
AWS EBS Volume IDs now contain AZ
|
||||
1
changelogs/unreleased/1278-skriss
Normal file
1
changelogs/unreleased/1278-skriss
Normal file
@@ -0,0 +1 @@
|
||||
Pass --log-level flag to internal/external plugins, matching Velero server's log level
|
||||
1
changelogs/unreleased/1280-fabito
Normal file
1
changelogs/unreleased/1280-fabito
Normal file
@@ -0,0 +1 @@
|
||||
Collect 3 new metrics: backup_deletion_{attempt|failure|success}_total
|
||||
1
changelogs/unreleased/1287-nrb
Normal file
1
changelogs/unreleased/1287-nrb
Normal file
@@ -0,0 +1 @@
|
||||
Add velero install command for basic use cases.
|
||||
1
changelogs/unreleased/1293-fabito
Normal file
1
changelogs/unreleased/1293-fabito
Normal file
@@ -0,0 +1 @@
|
||||
gracefully handle failed API groups from the discovery API
|
||||
1
changelogs/unreleased/1296-skriss
Normal file
1
changelogs/unreleased/1296-skriss
Normal file
@@ -0,0 +1 @@
|
||||
make resticrepositories non-restorable resources
|
||||
1
changelogs/unreleased/1297-skriss
Normal file
1
changelogs/unreleased/1297-skriss
Normal file
@@ -0,0 +1 @@
|
||||
check for and exclude hostPath-based persistent volumes from restic backup
|
||||
1
changelogs/unreleased/1298-sylr
Normal file
1
changelogs/unreleased/1298-sylr
Normal file
@@ -0,0 +1 @@
|
||||
azure: restore volumes in the original region's zone
|
||||
1
changelogs/unreleased/1300-skriss
Normal file
1
changelogs/unreleased/1300-skriss
Normal file
@@ -0,0 +1 @@
|
||||
Send stack traces from plugin errors to Velero via gRPC so error location info can be logged
|
||||
1
changelogs/unreleased/1301-skriss
Normal file
1
changelogs/unreleased/1301-skriss
Normal file
@@ -0,0 +1 @@
|
||||
log error locations from plugin logger, and don't overwrite them in the client logger if they exist already
|
||||
1
changelogs/unreleased/1304-skriss
Normal file
1
changelogs/unreleased/1304-skriss
Normal file
@@ -0,0 +1 @@
|
||||
enable restore item actions to return additional related items to be restored; have pods return PVCs and PVCs return PVs
|
||||
1
changelogs/unreleased/1306-pei0804
Normal file
1
changelogs/unreleased/1306-pei0804
Normal file
@@ -0,0 +1 @@
|
||||
compile only once to lower the initialization cost for regexp.MustCompile.
|
||||
1
changelogs/unreleased/1311-skriss
Normal file
1
changelogs/unreleased/1311-skriss
Normal file
@@ -0,0 +1 @@
|
||||
Allow restic restore helper image name to be optionally specified via ConfigMap
|
||||
1
changelogs/unreleased/1316-mstump
Normal file
1
changelogs/unreleased/1316-mstump
Normal file
@@ -0,0 +1 @@
|
||||
Fix for #1312, use describe to determine if AWS EBS snapshot is encrypted and explicitly pass that value in EC2 CreateVolume call.
|
||||
1
changelogs/unreleased/1318-skriss
Normal file
1
changelogs/unreleased/1318-skriss
Normal file
@@ -0,0 +1 @@
|
||||
remove Warning field from restore item action output
|
||||
1
changelogs/unreleased/1319-carlisia
Normal file
1
changelogs/unreleased/1319-carlisia
Normal file
@@ -0,0 +1 @@
|
||||
Bump plugin ProtocolVersion to version 2
|
||||
1
changelogs/unreleased/1321-skriss
Normal file
1
changelogs/unreleased/1321-skriss
Normal file
@@ -0,0 +1 @@
|
||||
rename BlockStore plugin to VolumeSnapshotter
|
||||
12
changelogs/unreleased/1323-skriss
Normal file
12
changelogs/unreleased/1323-skriss
Normal file
@@ -0,0 +1,12 @@
|
||||
remove deprecated code in preparation for v1.0 release:
|
||||
- remove ark.heptio.com API group
|
||||
- remove support for reading ark-backup.json files from object storage
|
||||
- remove Ark field from RestoreResult type
|
||||
- remove support for "hook.backup.ark.heptio.com/..." annotations for specifying hooks
|
||||
- remove support for $HOME/.config/ark/ client config directory
|
||||
- remove support for restoring Azure snapshots using short snapshot ID formats in backup metadata
|
||||
- stop applying "velero-restore" label to restored resources and remove it from the API pkg
|
||||
- remove code that strips the "gc.ark.heptio.com" finalizer from backups
|
||||
- remove support for "backup.ark.heptio.com/..." annotations for requesting restic backups
|
||||
- remove "ark"-prefixed prometheus metrics
|
||||
- remove VolumeBackups field and related code from Backup's status
|
||||
1
changelogs/unreleased/1326-amanw
Normal file
1
changelogs/unreleased/1326-amanw
Normal file
@@ -0,0 +1 @@
|
||||
Added ability to dynamically disable controllers
|
||||
1
changelogs/unreleased/1330-skriss
Normal file
1
changelogs/unreleased/1330-skriss
Normal file
@@ -0,0 +1 @@
|
||||
set backup's start timestamp before patching it to InProgress so start times display in `velero backup get` while in progress
|
||||
1
changelogs/unreleased/1331-skriss
Normal file
1
changelogs/unreleased/1331-skriss
Normal file
@@ -0,0 +1 @@
|
||||
Improve error message around invalid S3 URLs, and gracefully handle trailing backslashes.
|
||||
1
changelogs/unreleased/1336-sseago
Normal file
1
changelogs/unreleased/1336-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Add support for allowing a RestoreItemAction to skip item restore.
|
||||
1
changelogs/unreleased/1337-skriss
Normal file
1
changelogs/unreleased/1337-skriss
Normal file
@@ -0,0 +1 @@
|
||||
velero backup logs & velero restore logs: show helpful error message if backup/restore does not exist or is not finished processing
|
||||
1
changelogs/unreleased/1338-skriss
Normal file
1
changelogs/unreleased/1338-skriss
Normal file
@@ -0,0 +1 @@
|
||||
aws/azure/gcp: fail fast if unsupported keys are provided in BackupStorageLocation/VolumeSnapshotLocation config
|
||||
1
changelogs/unreleased/1339-carlisia
Normal file
1
changelogs/unreleased/1339-carlisia
Normal file
@@ -0,0 +1 @@
|
||||
Validate that there can't be any duplicate plugin name, and that the name format is `example.io/name`.
|
||||
1
changelogs/unreleased/1352-vorar
Normal file
1
changelogs/unreleased/1352-vorar
Normal file
@@ -0,0 +1 @@
|
||||
set default TTL for backups
|
||||
1
changelogs/unreleased/1353-fabito
Normal file
1
changelogs/unreleased/1353-fabito
Normal file
@@ -0,0 +1 @@
|
||||
Add gauge metrics for number of existing backups and restores
|
||||
1
changelogs/unreleased/1358-skriss
Normal file
1
changelogs/unreleased/1358-skriss
Normal file
@@ -0,0 +1 @@
|
||||
instantiate the plugin manager with the per-restore logger so plugin logs are captured in the per-restore log
|
||||
7
changelogs/unreleased/1364-skriss
Normal file
7
changelogs/unreleased/1364-skriss
Normal file
@@ -0,0 +1,7 @@
|
||||
Azure: allow credentials to be provided in a .env file (path specified by $AZURE_CREDENTIALS_FILE), formatted like:
|
||||
|
||||
AZURE_TENANT_ID=${AZURE_TENANT_ID}
|
||||
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
|
||||
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
|
||||
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
|
||||
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
|
||||
1
changelogs/unreleased/1365-skriss
Normal file
1
changelogs/unreleased/1365-skriss
Normal file
@@ -0,0 +1 @@
|
||||
change container base images to debian:stretch-slim and upgrade to go 1.12
|
||||
1
changelogs/unreleased/1366-nrb
Normal file
1
changelogs/unreleased/1366-nrb
Normal file
@@ -0,0 +1 @@
|
||||
Support non-namespaced names for built-in plugins
|
||||
1
changelogs/unreleased/1367-skriss
Normal file
1
changelogs/unreleased/1367-skriss
Normal file
@@ -0,0 +1 @@
|
||||
restic repo ensurer: return error if new repository does not become ready within a minute, and fix channel closing/deletion
|
||||
1
changelogs/unreleased/1384-skriss
Normal file
1
changelogs/unreleased/1384-skriss
Normal file
@@ -0,0 +1 @@
|
||||
remove deprecated "hooks" for backups (they've been replaced by "pre hooks")
|
||||
1
changelogs/unreleased/1386-skriss
Normal file
1
changelogs/unreleased/1386-skriss
Normal file
@@ -0,0 +1 @@
|
||||
add PartiallyFailed phase for backups, log + continue on errors during backup process
|
||||
1
changelogs/unreleased/1390-ncdc,carlisia
Normal file
1
changelogs/unreleased/1390-ncdc,carlisia
Normal file
@@ -0,0 +1 @@
|
||||
Fail backup if it already exists in object storage.
|
||||
1
changelogs/unreleased/1392-anshulc
Normal file
1
changelogs/unreleased/1392-anshulc
Normal file
@@ -0,0 +1 @@
|
||||
shorten label values when they're longer than 63 characters
|
||||
1
changelogs/unreleased/140-nrb
Normal file
1
changelogs/unreleased/140-nrb
Normal file
@@ -0,0 +1 @@
|
||||
Disallow bucket names starting with '-'
|
||||
1
changelogs/unreleased/1405-skriss
Normal file
1
changelogs/unreleased/1405-skriss
Normal file
@@ -0,0 +1 @@
|
||||
GCP: add optional 'project' config to volume snapshot location for if snapshots are in a different project than the IAM account
|
||||
1
changelogs/unreleased/1416-skriss
Normal file
1
changelogs/unreleased/1416-skriss
Normal file
@@ -0,0 +1 @@
|
||||
switch from `restic stats` to `restic snapshots` for checking restic repository existence
|
||||
1
changelogs/unreleased/1439-nrb
Normal file
1
changelogs/unreleased/1439-nrb
Normal file
@@ -0,0 +1 @@
|
||||
Use `latest` image tag if no version information is provided at build time
|
||||
1
changelogs/unreleased/1447-fabito
Normal file
1
changelogs/unreleased/1447-fabito
Normal file
@@ -0,0 +1 @@
|
||||
check backup existence before download
|
||||
1
changelogs/unreleased/1448-fabito
Normal file
1
changelogs/unreleased/1448-fabito
Normal file
@@ -0,0 +1 @@
|
||||
Expose the timestamp of the last successful backup in a gauge
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
Copyright 2018 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -45,7 +45,7 @@ func main() {
|
||||
}
|
||||
|
||||
// done returns true if for each directory under /restores, a file exists
|
||||
// within the .ark/ subdirectory whose name is equal to os.Args[1], or
|
||||
// within the .velero/ subdirectory whose name is equal to os.Args[1], or
|
||||
// false otherwise
|
||||
func done() bool {
|
||||
children, err := ioutil.ReadDir("/restores")
|
||||
@@ -60,7 +60,7 @@ func done() bool {
|
||||
continue
|
||||
}
|
||||
|
||||
doneFile := filepath.Join("/restores", child.Name(), ".ark", os.Args[1])
|
||||
doneFile := filepath.Join("/restores", child.Name(), ".velero", os.Args[1])
|
||||
|
||||
if _, err := os.Stat(doneFile); os.IsNotExist(err) {
|
||||
fmt.Printf("Not found: %s\n", doneFile)
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
Copyright 2017 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/ark"
|
||||
"github.com/heptio/velero/pkg/cmd"
|
||||
"github.com/heptio/velero/pkg/cmd/velero"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -31,6 +31,6 @@ func main() {
|
||||
|
||||
baseName := filepath.Base(os.Args[0])
|
||||
|
||||
err := ark.NewCommand(baseName).Execute()
|
||||
err := velero.NewCommand(baseName).Execute()
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
# How Ark Works
|
||||
# How Velero Works
|
||||
|
||||
Each Ark operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Ark also includes controllers that process the custom resources to perform backups, restores, and all related operations.
|
||||
Each Velero operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Velero also includes controllers that process the custom resources to perform backups, restores, and all related operations.
|
||||
|
||||
You can back up or restore all objects in your cluster, or you can filter objects by type, namespace, and/or label.
|
||||
|
||||
Ark is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
|
||||
Velero is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
|
||||
|
||||
## On-demand backups
|
||||
|
||||
@@ -27,17 +27,17 @@ Scheduled backups are saved with the name `<SCHEDULE NAME>-<TIMESTAMP>`, where `
|
||||
|
||||
## Restores
|
||||
|
||||
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456".
|
||||
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Velero supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456".
|
||||
|
||||
The default name of a restore is `<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `ark.heptio.com/restore-name` and value `<RESTORE NAME>`.
|
||||
The default name of a restore is `<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `velero.io/restore-name` and value `<RESTORE NAME>`.
|
||||
|
||||
You can also run the Ark server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
|
||||
You can also run the Velero server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
|
||||
|
||||
## Backup workflow
|
||||
|
||||
When you run `ark backup create test-backup`:
|
||||
When you run `velero backup create test-backup`:
|
||||
|
||||
1. The Ark client makes a call to the Kubernetes API server to create a `Backup` object.
|
||||
1. The Velero client makes a call to the Kubernetes API server to create a `Backup` object.
|
||||
|
||||
1. The `BackupController` notices the new `Backup` object and performs validation.
|
||||
|
||||
@@ -45,19 +45,19 @@ When you run `ark backup create test-backup`:
|
||||
|
||||
1. The `BackupController` makes a call to the object storage service -- for example, AWS S3 -- to upload the backup file.
|
||||
|
||||
By default, `ark backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `ark backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`.
|
||||
By default, `velero backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `velero backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`.
|
||||
|
||||
![19]
|
||||
|
||||
## Backed-up API versions
|
||||
|
||||
Ark backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful.
|
||||
Velero backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful.
|
||||
|
||||
For example, if the cluster being backed up has a `gizmos` resource in the `things` API group, with group/versions `things/v1alpha1`, `things/v1beta1`, and `things/v1`, and the server's preferred group/version is `things/v1`, then all `gizmos` will be backed up from the `things/v1` API endpoint. When backups from this cluster are restored, the target cluster **must** have the `things/v1` endpoint in order for `gizmos` to be restored. Note that `things/v1` **does not** need to be the preferred version in the target cluster; it just needs to exist.
|
||||
|
||||
## Set a backup to expire
|
||||
|
||||
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Ark sees that an existing backup resource is expired, it removes:
|
||||
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Velero sees that an existing backup resource is expired, it removes:
|
||||
|
||||
* The backup resource
|
||||
* The backup file from cloud object storage
|
||||
@@ -66,7 +66,7 @@ When you create a backup, you can specify a TTL by adding the flag `--ttl <DURAT
|
||||
|
||||
## Object storage sync
|
||||
|
||||
Heptio Ark treats object storage as the source of truth. It continuously checks to see that the correct backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding backup resource in the Kubernetes API, Ark synchronizes the information from object storage to Kubernetes.
|
||||
Velero treats object storage as the source of truth. It continuously checks to see that the correct backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding backup resource in the Kubernetes API, Velero synchronizes the information from object storage to Kubernetes.
|
||||
|
||||
This allows restore functionality to work in a cluster migration scenario, where the original backup objects do not exist in the new cluster.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## API types
|
||||
|
||||
Here we list the API types that have some functionality that you can only configure via json/yaml vs the `ark` cli
|
||||
Here we list the API types that have some functionality that you can only configure via json/yaml vs the `velero` cli
|
||||
(hooks)
|
||||
|
||||
* [Backup][1]
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
## Use
|
||||
|
||||
The `Backup` API type is used as a request for the Ark Server to perform a backup. Once created, the
|
||||
Ark Server immediately starts the backup process.
|
||||
The `Backup` API type is used as a request for the Velero server to perform a backup. Once created, the
|
||||
Velero Server immediately starts the backup process.
|
||||
|
||||
## API GroupVersion
|
||||
|
||||
Backup belongs to the API group version `ark.heptio.com/v1`.
|
||||
Backup belongs to the API group version `velero.io/v1`.
|
||||
|
||||
## Definition
|
||||
|
||||
@@ -15,15 +15,15 @@ Here is a sample `Backup` object with each of the fields documented:
|
||||
|
||||
```yaml
|
||||
# Standard Kubernetes API Version declaration. Required.
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
# Standard Kubernetes Kind declaration. Required.
|
||||
kind: Backup
|
||||
# Standard Kubernetes metadata. Required.
|
||||
metadata:
|
||||
# Backup name. May be any valid Kubernetes object name. Required.
|
||||
name: a
|
||||
# Backup namespace. Required. In version 0.7.0 and later, can be any string. Must be the namespace of the Ark server.
|
||||
namespace: heptio-ark
|
||||
# Backup namespace. Must be the namespace of the Velero server. Required.
|
||||
namespace: velero
|
||||
# Parameters about the backup. Required.
|
||||
spec:
|
||||
# Array of namespaces to include in the backup. If unspecified, all namespaces are included.
|
||||
@@ -54,11 +54,11 @@ spec:
|
||||
# Individual objects must match this label selector to be included in the backup. Optional.
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: ark
|
||||
app: velero
|
||||
component: server
|
||||
# Whether or not to snapshot volumes. This only applies to PersistentVolumes for Azure, GCE, and
|
||||
# AWS. Valid values are true, false, and null/unset. If unset, Ark performs snapshots as long as
|
||||
# a persistent volume provider is configured for Ark.
|
||||
# AWS. Valid values are true, false, and null/unset. If unset, Velero performs snapshots as long as
|
||||
# a persistent volume provider is configured for Velero.
|
||||
snapshotVolumes: null
|
||||
# Where to store the tarball and logs.
|
||||
storageLocation: aws-primary
|
||||
@@ -66,7 +66,9 @@ spec:
|
||||
volumeSnapshotLocations:
|
||||
- aws-primary
|
||||
- gcp-primary
|
||||
# The amount of time before this backup is eligible for garbage collection.
|
||||
# The amount of time before this backup is eligible for garbage collection. If not specified,
|
||||
# a default value of 30 days will be used. The default can be configured on the velero server
|
||||
# by passing the flag --default-backup-ttl.
|
||||
ttl: 24h0m0s
|
||||
# Actions to perform at different times during a backup. The only hook currently supported is
|
||||
# executing a command in a container in a pod using the pod exec API. Optional.
|
||||
@@ -92,13 +94,9 @@ spec:
|
||||
# This hook only applies to objects matching this label selector. Optional.
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: ark
|
||||
app: velero
|
||||
component: server
|
||||
# An array of hooks to run before executing custom actions. Currently only "exec" hooks are supported.
|
||||
# DEPRECATED. Use pre instead.
|
||||
hooks:
|
||||
# Same content as pre below.
|
||||
# An array of hooks to run before executing custom actions. Currently only "exec" hooks are supported.
|
||||
pre:
|
||||
-
|
||||
# The type of hook. This must be "exec".
|
||||
@@ -121,24 +119,25 @@ spec:
|
||||
# Same content as pre above.
|
||||
# Status about the Backup. Users should not set any data here.
|
||||
status:
|
||||
# The version of this Backup. The only version currently supported is 1.
|
||||
version: 1
|
||||
# The date and time when the Backup is eligible for garbage collection.
|
||||
expiration: null
|
||||
# The current phase. Valid values are New, FailedValidation, InProgress, Completed, Failed.
|
||||
# The current phase. Valid values are New, FailedValidation, InProgress, Completed, PartiallyFailed, Failed.
|
||||
phase: ""
|
||||
# An array of any validation errors encountered.
|
||||
validationErrors: null
|
||||
# The version of this Backup. The only version currently supported is 1.
|
||||
version: 1
|
||||
# Information about PersistentVolumes needed during restores.
|
||||
volumeBackups:
|
||||
# Each key is the name of a PersistentVolume.
|
||||
some-pv-name:
|
||||
# The ID used by the cloud provider for the snapshot created for this Backup.
|
||||
snapshotID: snap-1234
|
||||
# The type of the volume in the cloud provider API.
|
||||
type: io1
|
||||
# The availability zone where the volume resides in the cloud provider.
|
||||
availabilityZone: my-zone
|
||||
# The amount of provisioned IOPS for the volume. Optional.
|
||||
iops: 10000
|
||||
# Date/time when the backup started being processed.
|
||||
startTimestamp: 2019-04-29T15:58:43Z
|
||||
# Date/time when the backup finished being processed.
|
||||
completionTimestamp: 2019-04-29T15:58:56Z
|
||||
# Number of volume snapshots that Velero tried to create for this backup.
|
||||
volumeSnapshotsAttempted: 2
|
||||
# Number of volume snapshots that Velero successfully created for this backup.
|
||||
volumeSnapshotsCompleted: 1
|
||||
# Number of warnings that were logged by the backup.
|
||||
warnings: 2
|
||||
# Number of errors that were logged by the backup.
|
||||
errors: 0
|
||||
|
||||
```
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
# Ark Backup Storage Locations
|
||||
# Velero Backup Storage Locations
|
||||
|
||||
## Backup Storage Location
|
||||
|
||||
Ark can store backups in a number of locations. These are represented in the cluster via the `BackupStorageLocation` CRD.
|
||||
Velero can store backups in a number of locations. These are represented in the cluster via the `BackupStorageLocation` CRD.
|
||||
|
||||
Ark must have at least one `BackupStorageLocation`. By default, this is expected to be named `default`, however the name can be changed by specifying `--default-backup-storage-location` on `ark server`. Backups that do not explicitly specify a storage location will be saved to this `BackupStorageLocation`.
|
||||
|
||||
> *NOTE*: `BackupStorageLocation` takes the place of the `Config.backupStorageProvider` key as of v0.10.0
|
||||
Velero must have at least one `BackupStorageLocation`. By default, this is expected to be named `default`, however the name can be changed by specifying `--default-backup-storage-location` on `velero server`. Backups that do not explicitly specify a storage location will be saved to this `BackupStorageLocation`.
|
||||
|
||||
A sample YAML `BackupStorageLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
@@ -32,7 +30,7 @@ The configurable parameters are as follows:
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. |
|
||||
| `provider` | String (Velero natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. |
|
||||
| `objectStorage` | ObjectStorageLocation | Specification of the object storage for the given provider. |
|
||||
| `objectStorage/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/prefix` | String | Optional Field | The directory inside a storage bucket where backups are to be uploaded. |
|
||||
@@ -48,10 +46,10 @@ The configurable parameters are as follows:
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Velero can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `publicUrl` | string | Empty | *Example*: https://minio.mycluster.com<br><br>If specified, use this instead of `s3Url` when generating download URLs (e.g., for logs). This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by ark cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.|
|
||||
| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by velero cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.|
|
||||
|
||||
#### Azure
|
||||
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
# Ark Volume Snapshot Location
|
||||
# Velero Volume Snapshot Location
|
||||
|
||||
## Volume Snapshot Location
|
||||
|
||||
A volume snapshot location is the location in which to store the volume snapshots created for a backup.
|
||||
|
||||
Ark can be configured to take snapshots of volumes from multiple providers. Ark also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time.
|
||||
Velero can be configured to take snapshots of volumes from multiple providers. Velero also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time.
|
||||
|
||||
Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Ark must have at least one `VolumeSnapshotLocation` per cloud provider.
|
||||
Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Velero must have at least one `VolumeSnapshotLocation` per cloud provider.
|
||||
|
||||
A sample YAML `VolumeSnapshotLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: aws-default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
config:
|
||||
@@ -30,7 +30,7 @@ The configurable parameters are as follows:
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. |
|
||||
| `provider` | String (Velero natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. |
|
||||
| `config` | See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.
|
||||
|
||||
#### AWS
|
||||
@@ -52,9 +52,16 @@ The configurable parameters are as follows:
|
||||
|
||||
#### GCP
|
||||
|
||||
No parameters required.
|
||||
##### config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `snapshotLocation` | string | Empty | *Example*: "us-central1"<br><br>See [GCP documentation][4] for the full list.<br><br>If not specified the snapshots are stored in the [default location][5]. |
|
||||
| `project` | string | Empty | The project ID where snapshots should be stored, if different than the project that your IAM account is in. Optional. |
|
||||
|
||||
[0]: #aws
|
||||
[1]: #gcp
|
||||
[2]: #azure
|
||||
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions
|
||||
[4]: https://cloud.google.com/storage/docs/locations#available_locations
|
||||
[5]: https://cloud.google.com/compute/docs/disks/create-snapshots#default_location
|
||||
|
||||
@@ -1,29 +1,47 @@
|
||||
# Run Ark on AWS
|
||||
# Run Velero on AWS
|
||||
|
||||
To set up Ark on AWS, you:
|
||||
To set up Velero on AWS, you:
|
||||
|
||||
* Download an official release of Velero
|
||||
* Create your S3 bucket
|
||||
* Create an AWS IAM user for Ark
|
||||
* Configure the server
|
||||
* Create a Secret for your credentials
|
||||
* Create an AWS IAM user for Velero
|
||||
* Install the server
|
||||
|
||||
If you do not have the `aws` CLI locally installed, follow the [user guide][5] to set it up.
|
||||
|
||||
## Download Velero
|
||||
|
||||
1. Download the [latest official release's](https://github.com/heptio/velero/releases) tarball for your client platform.
|
||||
|
||||
_We strongly recommend that you use an [official release](https://github.com/heptio/velero/releases) of
|
||||
Velero. The tarballs for each release contain the `velero` command-line client. The code in the master branch
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
## Create S3 bucket
|
||||
|
||||
Heptio Ark requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately:
|
||||
Velero requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
REGION=<YOUR_REGION>
|
||||
aws s3api create-bucket \
|
||||
--bucket <YOUR_BUCKET> \
|
||||
--region <YOUR_REGION> \
|
||||
--create-bucket-configuration LocationConstraint=<YOUR_REGION>
|
||||
--bucket $BUCKET \
|
||||
--region $REGION \
|
||||
--create-bucket-configuration LocationConstraint=$REGION
|
||||
```
|
||||
NOTE: us-east-1 does not support a `LocationConstraint`. If your region is `us-east-1`, omit the bucket configuration:
|
||||
|
||||
```bash
|
||||
aws s3api create-bucket \
|
||||
--bucket <YOUR_BUCKET> \
|
||||
--bucket $BUCKET \
|
||||
--region us-east-1
|
||||
```
|
||||
|
||||
@@ -34,16 +52,15 @@ For more information, see [the AWS documentation on IAM users][14].
|
||||
1. Create the IAM user:
|
||||
|
||||
```bash
|
||||
aws iam create-user --user-name heptio-ark
|
||||
aws iam create-user --user-name velero
|
||||
```
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
> If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
2. Attach policies to give `heptio-ark` the necessary permissions:
|
||||
2. Attach policies to give `velero` the necessary permissions:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > heptio-ark-policy.json <<EOF
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -86,15 +103,15 @@ For more information, see [the AWS documentation on IAM users][14].
|
||||
EOF
|
||||
|
||||
aws iam put-user-policy \
|
||||
--user-name heptio-ark \
|
||||
--policy-name heptio-ark \
|
||||
--policy-document file://heptio-ark-policy.json
|
||||
--user-name velero \
|
||||
--policy-name velero \
|
||||
--policy-document file://velero-policy.json
|
||||
```
|
||||
|
||||
3. Create an access key for the user:
|
||||
|
||||
```bash
|
||||
aws iam create-access-key --user-name heptio-ark
|
||||
aws iam create-access-key --user-name velero
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
@@ -102,7 +119,7 @@ For more information, see [the AWS documentation on IAM users][14].
|
||||
```json
|
||||
{
|
||||
"AccessKey": {
|
||||
"UserName": "heptio-ark",
|
||||
"UserName": "velero",
|
||||
"Status": "Active",
|
||||
"CreateDate": "2017-07-31T22:24:41.576Z",
|
||||
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
|
||||
@@ -111,7 +128,7 @@ For more information, see [the AWS documentation on IAM users][14].
|
||||
}
|
||||
```
|
||||
|
||||
4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory:
|
||||
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
@@ -121,43 +138,33 @@ For more information, see [the AWS documentation on IAM users][14].
|
||||
|
||||
where the access key id and secret are the values returned from the `create-access-key` request.
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
## Install and start Velero
|
||||
|
||||
Install Velero, including all prerequisites, into the cluster and start the deployment. This will create a namespace called `velero`, and place a deployment named `velero` in it.
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
velero install \
|
||||
--provider aws \
|
||||
--bucket $BUCKET \
|
||||
--secret-file ./credentials-velero \
|
||||
--backup-location-config region=$REGION \
|
||||
--snapshot-location-config region=$REGION
|
||||
```
|
||||
|
||||
Create a Secret. In the directory of the credentials file you just created, run:
|
||||
Additionally, you can specify `--use-restic` to enable restic support, and `--wait` to wait for the deployment to be ready.
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
(Optional) Specify [additional configurable parameters][21] for the `--backup-location-config` flag.
|
||||
|
||||
Specify the following values in the example files:
|
||||
(Optional) Specify [additional configurable parameters][6] for the `--snapshot-location-config` flag.
|
||||
|
||||
* In `config/aws/05-ark-backupstoragelocation.yaml`:
|
||||
For more complex installation needs, use either the Helm chart, or add `--dry-run -o yaml` options for generating the YAML representation for the installation.
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>` (for S3 backup storage, region is optional and will be queried from the AWS S3 API if not provided). See the [BackupStorageLocation definition][21] for details.
|
||||
## Setting AWS_CLUSTER_NAME (Optional)
|
||||
|
||||
* In `config/aws/06-ark-volumesnapshotlocation.yaml`:
|
||||
* If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
|
||||
|
||||
* Replace `<YOUR_REGION>`. See the [VolumeSnapshotLocation definition][6] for details.
|
||||
|
||||
* (Optional, use only to specify multiple volume snapshot locations) In `config/aws/10-deployment.yaml` (or `config/aws/10-deployment-kube2iam.yaml`, as appropriate):
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `config/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `gp2`. This is AWS's default `StorageClass` name.
|
||||
|
||||
* (Optional) If you have multiple clusters and you want to support migration of resources between them, in file `config/aws/10-deployment.yaml`:
|
||||
|
||||
* Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `<YOUR_CLUSTER_NAME>` with the current cluster's name. When restoring backup, it will make Ark (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
* Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
|
||||
|
||||
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
|
||||
@@ -180,28 +187,18 @@ Specify the following values in the example files:
|
||||
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
|
||||
```
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/aws/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/aws/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/aws/10-deployment.yaml
|
||||
```
|
||||
|
||||
## ALTERNATIVE: Setup permissions using kube2iam
|
||||
|
||||
[Kube2iam](https://github.com/jtblin/kube2iam) is a Kubernetes application that allows managing AWS IAM permissions for pod via annotations rather than operating on API keys.
|
||||
|
||||
> This path assumes you have `kube2iam` already running in your Kubernetes cluster. If that is not the case, please install it first, following the docs here: [https://github.com/jtblin/kube2iam](https://github.com/jtblin/kube2iam)
|
||||
|
||||
It can be set up for Ark by creating a role that will have required permissions, and later by adding the permissions annotation on the ark deployment to define which role it should use internally.
|
||||
It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally.
|
||||
|
||||
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
|
||||
|
||||
```bash
|
||||
cat > heptio-ark-trust-policy.json <<EOF
|
||||
cat > velero-trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -227,14 +224,14 @@ It can be set up for Ark by creating a role that will have required permissions,
|
||||
2. Create the IAM role:
|
||||
|
||||
```bash
|
||||
aws iam create-role --role-name heptio-ark --assume-role-policy-document file://./heptio-ark-trust-policy.json
|
||||
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
|
||||
```
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions:
|
||||
3. Attach policies to give `velero` the necessary permissions:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > heptio-ark-policy.json <<EOF
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -277,31 +274,35 @@ It can be set up for Ark by creating a role that will have required permissions,
|
||||
EOF
|
||||
|
||||
aws iam put-role-policy \
|
||||
--role-name heptio-ark \
|
||||
--policy-name heptio-ark-policy \
|
||||
--policy-document file://./heptio-ark-policy.json
|
||||
--role-name velero \
|
||||
--policy-name velero-policy \
|
||||
--policy-document file://./velero-policy.json
|
||||
```
|
||||
4. Update `AWS_ACCOUNT_ID` & `HEPTIO_ARK_ROLE_NAME` in the file `config/aws/10-deployment-kube2iam.yaml`:
|
||||
4. Update `AWS_ACCOUNT_ID` & `VELERO_ROLE_NAME` with `kubectl edit deploy/velero -n velero` and add the following annotation:
|
||||
|
||||
```
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<HEPTIO_ARK_ROLE_NAME>
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
...
|
||||
```
|
||||
|
||||
5. Run Ark deployment using the file `config/aws/10-deployment-kube2iam.yaml`.
|
||||
## Installing the nginx example (optional)
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `gp2`. This is AWS's default `StorageClass` name.
|
||||
|
||||
[0]: namespace.md
|
||||
[5]: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Run Ark on Azure
|
||||
# Run Velero on Azure
|
||||
|
||||
To configure Ark on Azure, you:
|
||||
To configure Velero on Azure, you:
|
||||
|
||||
* Download an official release of Velero
|
||||
* Create your Azure storage account and blob container
|
||||
* Create Azure service principal for Ark
|
||||
* Configure the server
|
||||
* Create a Secret for your credentials
|
||||
* Create Azure service principal for Velero
|
||||
* Install the server
|
||||
|
||||
If you do not have the `az` Azure CLI 2.0 installed locally, follow the [install guide][18] to set it up.
|
||||
|
||||
@@ -20,13 +20,29 @@ az login
|
||||
Ensure that the VMs for your agent pool allow Managed Disks. If I/O performance is critical,
|
||||
consider using Premium Managed Disks, which are SSD backed.
|
||||
|
||||
## Download Velero
|
||||
|
||||
1. Download the [latest official release's](https://github.com/heptio/velero/releases) tarball for your client platform.
|
||||
|
||||
_We strongly recommend that you use an [official release](https://github.com/heptio/velero/releases) of
|
||||
Velero. The tarballs for each release contain the `velero` command-line client. The code in the master branch
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
## Create Azure storage account and blob container
|
||||
|
||||
Heptio Ark requires a storage account and blob container in which to store backups.
|
||||
Velero requires a storage account and blob container in which to store backups.
|
||||
|
||||
The storage account can be created in the same Resource Group as your Kubernetes cluster or
|
||||
separated into its own Resource Group. The example below shows the storage account created in a
|
||||
separate `Ark_Backups` Resource Group.
|
||||
separate `Velero_Backups` Resource Group.
|
||||
|
||||
The storage account needs to be created with a globally unique id since this is used for dns. In
|
||||
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
|
||||
@@ -36,11 +52,11 @@ configured to only allow access via https.
|
||||
|
||||
```bash
|
||||
# Create a resource group for the backups storage account. Change the location as needed.
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Ark_Backups
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups
|
||||
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
|
||||
|
||||
# Create the storage account
|
||||
AZURE_STORAGE_ACCOUNT_ID="ark$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
|
||||
@@ -51,10 +67,11 @@ az storage account create \
|
||||
--access-tier Hot
|
||||
```
|
||||
|
||||
Create the blob container named `ark`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details.
|
||||
Create the blob container named `velero`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details.
|
||||
|
||||
```bash
|
||||
az storage container create -n ark --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
BLOB_CONTAINER=velero
|
||||
az storage container create -n $BLOB_CONTAINER --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
```
|
||||
|
||||
## Get resource group for persistent volume snapshots
|
||||
@@ -78,7 +95,7 @@ az storage container create -n ark --public-access off --account-name $AZURE_STO
|
||||
|
||||
## Create service principal
|
||||
|
||||
To integrate Ark with Azure, you must create an Ark-specific [service principal][17].
|
||||
To integrate Velero with Azure, you must create a Velero-specific [service principal][17].
|
||||
|
||||
1. Obtain your Azure Account Subscription ID and Tenant ID:
|
||||
|
||||
@@ -89,61 +106,58 @@ To integrate Ark with Azure, you must create an Ark-specific [service principal]
|
||||
|
||||
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
> If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
```bash
|
||||
# Create service principal and specify your own password
|
||||
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
|
||||
az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
|
||||
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --query 'password' -o tsv`
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv`
|
||||
|
||||
# After creating the service principal, obtain the client id
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "heptio-ark" --query '[0].appId' -o tsv`
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
|
||||
```
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML file to specify the namespace. See [Run in custom namespace][0].
|
||||
Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
cat << EOF > ./credentials-velero
|
||||
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
|
||||
AZURE_TENANT_ID=${AZURE_TENANT_ID}
|
||||
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
|
||||
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
|
||||
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
|
||||
EOF
|
||||
```
|
||||
|
||||
Now you need to create a Secret that contains all the environment variables you just set. The command looks like the following:
|
||||
## Install and start Velero
|
||||
|
||||
Install Velero, including all prerequisites, into the cluster and start the deployment. This will create a namespace called `velero`, and place a deployment named `velero` in it.
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \
|
||||
--from-literal AZURE_TENANT_ID=${AZURE_TENANT_ID} \
|
||||
--from-literal AZURE_CLIENT_ID=${AZURE_CLIENT_ID} \
|
||||
--from-literal AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET} \
|
||||
--from-literal AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
|
||||
velero install \
|
||||
--provider azure \
|
||||
--bucket $BLOB_CONTAINER \
|
||||
--secret-file ./credentials-velero \
|
||||
--backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID \
|
||||
--snapshot-location-config apiTimeout=<YOUR_TIMEOUT>
|
||||
```
|
||||
|
||||
Now that you have your Azure credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
Additionally, you can specify `--use-restic` to enable restic support, and `--wait` to wait for the deployment to be ready.
|
||||
|
||||
* In file `config/azure/05-ark-backupstoragelocation.yaml`:
|
||||
(Optional) Specify [additional configurable parameters][21] for the `--backup-location-config` flag.
|
||||
|
||||
* Replace `<YOUR_BLOB_CONTAINER>`, `<YOUR_STORAGE_RESOURCE_GROUP>`, and `<YOUR_STORAGE_ACCOUNT>`. See the [BackupStorageLocation definition][21] for details.
|
||||
(Optional) Specify [additional configurable parameters][8] for the `--snapshot-location-config` flag.
|
||||
|
||||
* In file `config/azure/06-ark-volumesnapshotlocation.yaml`:
|
||||
For more complex installation needs, use either the Helm chart, or add `--dry-run -o yaml` options for generating the YAML representation for the installation.
|
||||
|
||||
* Replace `<YOUR_TIMEOUT>`. See the [VolumeSnapshotLocation definition][8] for details.
|
||||
## Installing the nginx example (optional)
|
||||
|
||||
* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/azure/00-ark-deployment.yaml`:
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/azure/
|
||||
```
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
|
||||
|
||||
[0]: namespace.md
|
||||
[8]: api-types/volumesnapshotlocation.md#azure
|
||||
|
||||
@@ -1,225 +0,0 @@
|
||||
# Build from source
|
||||
|
||||
* [Prerequisites][1]
|
||||
* [Download][2]
|
||||
* [Build][3]
|
||||
* [Test][12]
|
||||
* [Run][7]
|
||||
* [Vendoring dependencies][10]
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
* [Go][5] installed (minimum version 1.8)
|
||||
|
||||
## Getting the source
|
||||
|
||||
```bash
|
||||
mkdir $HOME/go
|
||||
export GOPATH=$HOME/go
|
||||
go get github.com/heptio/ark
|
||||
```
|
||||
|
||||
Where `go` is your [import path][4] for Go.
|
||||
|
||||
For Go development, it is recommended to add the Go import path (`$HOME/go` in this example) to your path.
|
||||
|
||||
|
||||
## Build
|
||||
|
||||
You can build your Ark image locally on the machine where you run your cluster, or you can push it to a private registry. This section covers both workflows.
|
||||
|
||||
Set the `$REGISTRY` environment variable (used in the `Makefile`) to push the Heptio Ark images to your own registry. This allows any node in your cluster to pull your locally built image.
|
||||
|
||||
In the Ark root directory, to build your container with the tag `$REGISTRY/ark:$VERSION`, run:
|
||||
|
||||
```
|
||||
make container
|
||||
```
|
||||
|
||||
To push your image to a registry, use `make push`.
|
||||
|
||||
### Update generated files
|
||||
|
||||
The following files are automatically generated from the source code:
|
||||
|
||||
* The clientset
|
||||
* Listers
|
||||
* Shared informers
|
||||
* Documentation
|
||||
* Protobuf/gRPC types
|
||||
|
||||
Run `make update` to regenerate files if you make the following changes:
|
||||
|
||||
* Add/edit/remove command line flags and/or their help text
|
||||
* Add/edit/remove commands or subcommands
|
||||
* Add new API types
|
||||
|
||||
Run [generate-proto.sh][13] to regenerate files if you make the following changes:
|
||||
|
||||
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14].
|
||||
|
||||
### Cross compiling
|
||||
|
||||
By default, `make build` builds an `ark` binary for `linux-amd64`.
|
||||
To build for another platform, run `make build-<GOOS>-<GOARCH>`.
|
||||
For example, to build for the Mac, run `make build-darwin-amd64`.
|
||||
All binaries are placed in `_output/bin/<GOOS>/<GOARCH>`-- for example, `_output/bin/darwin/amd64/ark`.
|
||||
|
||||
Ark's `Makefile` has a convenience target, `all-build`, that builds the following platforms:
|
||||
|
||||
* linux-amd64
|
||||
* linux-arm
|
||||
* linux-arm64
|
||||
* darwin-amd64
|
||||
* windows-amd64
|
||||
|
||||
## 3. Test
|
||||
|
||||
To run unit tests, use `make test`. You can also run `make verify` to ensure that all generated
|
||||
files (clientset, listers, shared informers, docs) are up to date.
|
||||
|
||||
## 4. Run
|
||||
|
||||
### Prerequisites
|
||||
|
||||
When running Heptio Ark, you will need to account for the following (all of which are handled in the [`/examples`][6] manifests):
|
||||
|
||||
* Appropriate RBAC permissions in the cluster
|
||||
* Read access for all data from the source cluster and namespaces
|
||||
* Write access to the target cluster and namespaces
|
||||
* Cloud provider credentials
|
||||
* Read/write access to volumes
|
||||
* Read/write access to object storage for backup data
|
||||
* A [BackupStorageLocation][20] object definition for the Ark server
|
||||
* (Optional) A [VolumeSnapshotLocation][21] object definition for the Ark server, to take PV snapshots
|
||||
|
||||
### Create a cluster
|
||||
|
||||
To provision a cluster on AWS using Amazon’s official CloudFormation templates, here are two options:
|
||||
|
||||
* EC2 [Quick Start for Kubernetes][17]
|
||||
|
||||
* eksctl - [a CLI for Amazon EKS][18]
|
||||
|
||||
### Option 1: Run your Ark server locally
|
||||
|
||||
Running the Ark server locally can speed up iterative development. This eliminates the need to rebuild the Ark server
|
||||
image and redeploy it to the cluster with each change.
|
||||
|
||||
#### 1. Set enviroment variables
|
||||
|
||||
Set the appropriate environment variable for your cloud provider:
|
||||
|
||||
AWS: [AWS_SHARED_CREDENTIALS_FILE][15]
|
||||
|
||||
GCP: [GOOGLE_APPLICATION_CREDENTIALS][16]
|
||||
|
||||
Azure:
|
||||
|
||||
1. AZURE_CLIENT_ID
|
||||
|
||||
2. AZURE_CLIENT_SECRET
|
||||
|
||||
3. AZURE_SUBSCRIPTION_ID
|
||||
|
||||
4. AZURE_TENANT_ID
|
||||
|
||||
5. AZURE_STORAGE_ACCOUNT_ID
|
||||
|
||||
6. AZURE_STORAGE_KEY
|
||||
|
||||
7. AZURE_RESOURCE_GROUP
|
||||
|
||||
#### 2. Create resources in a cluster
|
||||
|
||||
You may create resources on a cluster using our [example configurations][19].
|
||||
|
||||
##### Example
|
||||
|
||||
Here is how to setup using an existing cluster in AWS: At the root of the Ark repo:
|
||||
|
||||
- Edit `examples/aws/05-ark-backupstoragelocation.yaml` to point to your AWS S3 bucket and region. Note: you can run `aws s3api list-buckets` to get the name of all your buckets.
|
||||
|
||||
- (Optional) Edit `examples/aws/06-ark-volumesnapshotlocation.yaml` to point to your AWS region.
|
||||
|
||||
Then run the commands below.
|
||||
|
||||
`00-prereqs.yaml` contains all our CustomResourceDefinitions (CRDs) that allow us to perform CRUD operations on backups, restores, schedules, etc. it also contains the `heptio-ark` namespace, the `ark` ServiceAccount, and a cluster role binding to grant the `ark` ServiceAccount the cluster-admin role:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
`10-deployment.yaml` is a sample Ark config resource for AWS:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/10-deployment.yaml
|
||||
```
|
||||
|
||||
And `05-ark-backupstoragelocation.yaml` specifies the location of your backup storage, together with the optional `06-ark-volumesnapshotlocation.yaml`:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml examples/aws/06-ark-volumesnapshotlocation.yaml
|
||||
```
|
||||
|
||||
### 3. Start the Ark server
|
||||
|
||||
* Make sure `ark` is in your `PATH` or specify the full path.
|
||||
|
||||
* Set variable for Ark as needed. The variables below can be exported as environment variables or passed as CLI cmd flags:
|
||||
* `--kubeconfig`: set the path to the kubeconfig file the Ark server uses to talk to the Kubernetes apiserver
|
||||
* `--namespace`: the set namespace where the Ark server should look for backups, schedules, restores
|
||||
* `--log-level`: set the Ark server's log level
|
||||
* `--plugin-dir`: set the directory where the Ark server looks for plugins
|
||||
* `--metrics-address`: set the bind address and port where Prometheus metrics are exposed
|
||||
|
||||
* Start the server: `ark server`
|
||||
|
||||
### Option 2: Run your Ark server in a deployment
|
||||
|
||||
1. Install Ark using a deployment:
|
||||
|
||||
We have examples of deployments for different cloud providers in `examples/<cloud-provider>/10-deployment.yaml`.
|
||||
|
||||
2. Replace the deployment's default Ark image with the image that you built. Run:
|
||||
|
||||
```
|
||||
kubectl --namespace=heptio-ark set image deployment/ark ark=$REGISTRY/ark:$VERSION
|
||||
```
|
||||
|
||||
where `$REGISTRY` and `$VERSION` are the values that you built Ark with.
|
||||
|
||||
## 5. Vendoring dependencies
|
||||
|
||||
If you need to add or update the vendored dependencies, see [Vendoring dependencies][11].
|
||||
|
||||
[0]: ../README.md
|
||||
[1]: #prerequisites
|
||||
[2]: #download
|
||||
[3]: #build
|
||||
[4]: https://blog.golang.org/organizing-go-code
|
||||
[5]: https://golang.org/doc/install
|
||||
[6]: https://github.com/heptio/ark/tree/master/examples
|
||||
[7]: #run
|
||||
[8]: config-definition.md
|
||||
[10]: #vendoring-dependencies
|
||||
[11]: vendoring-dependencies.md
|
||||
[12]: #test
|
||||
[13]: https://github.com/heptio/ark/blob/master/hack/generate-proto.sh
|
||||
[14]: https://grpc.io/docs/quickstart/go.html#install-protocol-buffers-v3
|
||||
[15]: https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#the-shared-credentials-file
|
||||
[16]: https://cloud.google.com/docs/authentication/getting-started#setting_the_environment_variable
|
||||
[17]: https://aws.amazon.com/quickstart/architecture/heptio-kubernetes/
|
||||
[18]: https://eksctl.io/
|
||||
[19]: ../examples/README.md
|
||||
[20]: api-types/backupstoragelocation.md
|
||||
[21]: api-types/volumesnapshotlocation.md
|
||||
243
docs/build-from-source.md
Normal file
243
docs/build-from-source.md
Normal file
@@ -0,0 +1,243 @@
|
||||
# Build from source
|
||||
|
||||
* [Prerequisites][1]
|
||||
* [Getting the source][2]
|
||||
* [Build][3]
|
||||
* [Test][12]
|
||||
* [Run][7]
|
||||
* [Vendoring dependencies][10]
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Access to a Kubernetes cluster, version 1.7 or later.
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
* [Go][5] installed (minimum version 1.8)
|
||||
|
||||
## Getting the source
|
||||
|
||||
### Option 1) Get latest (recommended)
|
||||
|
||||
```bash
|
||||
mkdir $HOME/go
|
||||
export GOPATH=$HOME/go
|
||||
go get github.com/heptio/velero
|
||||
```
|
||||
|
||||
Where `go` is your [import path][4] for Go.
|
||||
|
||||
For Go development, it is recommended to add the Go import path (`$HOME/go` in this example) to your path.
|
||||
|
||||
### Option 2) Release archive
|
||||
Download the archive named `Source code` from the [release page][22] and extract it in your Go import path as `src/github.com/heptio/velero`.
|
||||
|
||||
Note that the Makefile targets assume building from a git repository. When building from an archive, you will be limited to the `go build` commands described below.
|
||||
|
||||
## Build
|
||||
There are a number of different ways to build `velero` depending on your needs. This section outlines the main possibilities.
|
||||
|
||||
To build the `velero` binary on your local machine, compiled for your OS and architecture, run:
|
||||
```bash
|
||||
go build ./cmd/velero
|
||||
```
|
||||
or:
|
||||
```bash
|
||||
make local
|
||||
```
|
||||
|
||||
The latter will place the compiled binary under `$PWD/_output/bin/$GOOS/$GOARCH`, and will splice version and git commit information in so that `velero version` displays proper output. `velero install` will also use the version information to determine which tagged image to deploy.
|
||||
|
||||
To build the `velero` binary targeting `linux/amd64` within a build container on your local machine, run:
|
||||
```bash
|
||||
make build
|
||||
```
|
||||
|
||||
See the **Cross compiling** section below for details on building for alternate OS/architecture combinations.
|
||||
|
||||
To build a Velero container image, first set the `$REGISTRY` environment variable. For example, if you want to build the `gcr.io/my-registry/velero:master` image, set `$REGISTRY` to `gcr.io/my-registry`. Optionally, set the `$VERSION` environment variable to change the image tag. Then, run:
|
||||
```bash
|
||||
make container
|
||||
```
|
||||
|
||||
To push your image to a registry, run:
|
||||
```bash
|
||||
make push
|
||||
```
|
||||
|
||||
### Update generated files
|
||||
|
||||
The following files are automatically generated from the source code:
|
||||
|
||||
* The clientset
|
||||
* Listers
|
||||
* Shared informers
|
||||
* Documentation
|
||||
* Protobuf/gRPC types
|
||||
|
||||
Run `make update` to regenerate files if you make the following changes:
|
||||
|
||||
* Add/edit/remove command line flags and/or their help text
|
||||
* Add/edit/remove commands or subcommands
|
||||
* Add new API types
|
||||
|
||||
Run [generate-proto.sh][13] to regenerate files if you make the following changes:
|
||||
|
||||
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14] and compiler plugin `protoc-gen-go` version v1.0.0.
|
||||
|
||||
### Cross compiling
|
||||
|
||||
By default, `make build` builds an `velero` binary for `linux-amd64`.
|
||||
To build for another platform, run `make build-<GOOS>-<GOARCH>`.
|
||||
For example, to build for the Mac, run `make build-darwin-amd64`.
|
||||
All binaries are placed in `_output/bin/<GOOS>/<GOARCH>`-- for example, `_output/bin/darwin/amd64/velero`.
|
||||
|
||||
Velero's `Makefile` has a convenience target, `all-build`, that builds the following platforms:
|
||||
|
||||
* linux-amd64
|
||||
* linux-arm
|
||||
* linux-arm64
|
||||
* darwin-amd64
|
||||
* windows-amd64
|
||||
|
||||
## 3. Test
|
||||
|
||||
To run unit tests, use `make test`. You can also run `make verify` to ensure that all generated
|
||||
files (clientset, listers, shared informers, docs) are up to date.
|
||||
|
||||
## 4. Run
|
||||
|
||||
### Prerequisites
|
||||
|
||||
When running Velero, you will need to ensure that you set up all of the following:
|
||||
|
||||
* Appropriate RBAC permissions in the cluster
|
||||
* Read access for all data from the source cluster and namespaces
|
||||
* Write access to the target cluster and namespaces
|
||||
* Cloud provider credentials
|
||||
* Read/write access to volumes
|
||||
* Read/write access to object storage for backup data
|
||||
* A [BackupStorageLocation][20] object definition for the Velero server
|
||||
* (Optional) A [VolumeSnapshotLocation][21] object definition for the Velero server, to take PV snapshots
|
||||
|
||||
### Create a cluster
|
||||
|
||||
To provision a cluster on AWS using Amazon’s official CloudFormation templates, here are two options:
|
||||
|
||||
* EC2 [Quick Start for Kubernetes][17]
|
||||
|
||||
* eksctl - [a CLI for Amazon EKS][18]
|
||||
|
||||
### Option 1: Run your Velero server locally
|
||||
|
||||
Running the Velero server locally can speed up iterative development. This eliminates the need to rebuild the Velero server
|
||||
image and redeploy it to the cluster with each change.
|
||||
|
||||
#### 1. Set enviroment variables
|
||||
|
||||
Set the appropriate environment variable for your cloud provider:
|
||||
|
||||
AWS: [AWS_SHARED_CREDENTIALS_FILE][15]
|
||||
|
||||
GCP: [GOOGLE_APPLICATION_CREDENTIALS][16]
|
||||
|
||||
Azure:
|
||||
|
||||
1. AZURE_CLIENT_ID
|
||||
|
||||
2. AZURE_CLIENT_SECRET
|
||||
|
||||
3. AZURE_SUBSCRIPTION_ID
|
||||
|
||||
4. AZURE_TENANT_ID
|
||||
|
||||
5. AZURE_STORAGE_ACCOUNT_ID
|
||||
|
||||
6. AZURE_STORAGE_KEY
|
||||
|
||||
7. AZURE_RESOURCE_GROUP
|
||||
|
||||
#### 2. Create required Velero resources in the cluster
|
||||
|
||||
You can use the `velero install` command to install velero into your cluster, then remove the deployment from the cluster, leaving you
|
||||
with all of the required in-cluster resources.
|
||||
|
||||
##### Example
|
||||
|
||||
This examples assumes you are using an existing cluster in AWS.
|
||||
|
||||
Using the `velero` binary that you've built, run `velero install`:
|
||||
|
||||
```bash
|
||||
# velero install requires a credentials file to exist, but we will
|
||||
# not be using it since we're running the server locally, so just
|
||||
# create an empty file to pass to the install command.
|
||||
touch fake-credentials-file
|
||||
|
||||
velero install \
|
||||
--provider aws \
|
||||
--bucket $BUCKET \
|
||||
--backup-location-config region=$REGION \
|
||||
--snapshot-location-config region=$REGION \
|
||||
--secret-file ./fake-credentials-file
|
||||
|
||||
# 'velero install' creates an in-cluster deployment of the
|
||||
# velero server using an official velero image, but we want
|
||||
# to run the velero server on our local machine using the
|
||||
# binary we built, so delete the in-cluster deployment.
|
||||
kubectl --namespace velero delete deployment velero
|
||||
|
||||
rm fake-credentials-file
|
||||
```
|
||||
|
||||
#### 3. Start the Velero server locally
|
||||
|
||||
* Make sure the `velero` binary you build is in your `PATH`, or specify the full path.
|
||||
|
||||
* Start the server: `velero server [CLI flags]`. The following CLI flags may be useful to customize, but see `velero server --help` for full details:
|
||||
* `--kubeconfig`: set the path to the kubeconfig file the Velero server uses to talk to the Kubernetes apiserver (default `$KUBECONFIG`)
|
||||
* `--namespace`: the set namespace where the Velero server should look for backups, schedules, restores (default `velero`)
|
||||
* `--log-level`: set the Velero server's log level (default `info`)
|
||||
* `--plugin-dir`: set the directory where the Velero server looks for plugins (default `/plugins`)
|
||||
* `--metrics-address`: set the bind address and port where Prometheus metrics are exposed (default `:8085`)
|
||||
|
||||
### Option 2: Run your Velero server in a deployment
|
||||
|
||||
1. Ensure you've built a `velero` container image and either loaded it onto your cluster's nodes, or pushed it to a registry (see [build][3]).
|
||||
|
||||
1. Install Velero into the cluster (the example below assumes you're using AWS):
|
||||
```bash
|
||||
velero install \
|
||||
--provider aws \
|
||||
--image $YOUR_CONTAINER_IMAGE \
|
||||
--bucket $BUCKET \
|
||||
--backup-location-config region=$REGION \
|
||||
--snapshot-location-config region=$REGION \
|
||||
--secret-file $YOUR_AWS_CREDENTIALS_FILE
|
||||
```
|
||||
|
||||
## 5. Vendoring dependencies
|
||||
|
||||
If you need to add or update the vendored dependencies, see [Vendoring dependencies][11].
|
||||
|
||||
[0]: ../README.md
|
||||
[1]: #prerequisites
|
||||
[2]: #getting-the-source
|
||||
[3]: #build
|
||||
[4]: https://blog.golang.org/organizing-go-code
|
||||
[5]: https://golang.org/doc/install
|
||||
[6]: https://github.com/heptio/velero/tree/master/examples
|
||||
[7]: #run
|
||||
[8]: config-definition.md
|
||||
[10]: #vendoring-dependencies
|
||||
[11]: vendoring-dependencies.md
|
||||
[12]: #test
|
||||
[13]: https://github.com/heptio/velero/blob/master/hack/generate-proto.sh
|
||||
[14]: https://grpc.io/docs/quickstart/go.html#install-protocol-buffers-v3
|
||||
[15]: https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#the-shared-credentials-file
|
||||
[16]: https://cloud.google.com/docs/authentication/getting-started#setting_the_environment_variable
|
||||
[17]: https://aws.amazon.com/quickstart/architecture/heptio-kubernetes/
|
||||
[18]: https://eksctl.io/
|
||||
[19]: ../examples/README.md
|
||||
[20]: api-types/backupstoragelocation.md
|
||||
[21]: api-types/volumesnapshotlocation.md
|
||||
[22]: https://github.com/heptio/velero/releases
|
||||
@@ -3,57 +3,64 @@
|
||||
## General
|
||||
|
||||
### `invalid configuration: no configuration has been provided`
|
||||
This typically means that no `kubeconfig` file can be found for the Ark client to use. Ark looks for a kubeconfig in the
|
||||
This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the
|
||||
following locations:
|
||||
* the path specified by the `--kubeconfig` flag, if any
|
||||
* the path specified by the `$KUBECONFIG` environment variable, if any
|
||||
* `~/.kube/config`
|
||||
|
||||
### Backups or restores stuck in `New` phase
|
||||
This means that the Ark controllers are not processing the backups/restores, which usually happens because the Ark server is not running. Check the pod description and logs for errors:
|
||||
This means that the Velero controllers are not processing the backups/restores, which usually happens because the Velero server is not running. Check the pod description and logs for errors:
|
||||
```
|
||||
kubectl -n heptio-ark describe pods
|
||||
kubectl -n heptio-ark logs deployment/ark
|
||||
kubectl -n velero describe pods
|
||||
kubectl -n velero logs deployment/velero
|
||||
```
|
||||
|
||||
|
||||
## AWS
|
||||
|
||||
### `NoCredentialProviders: no valid providers in chain`
|
||||
This means that the secret containing the AWS IAM user credentials for Ark has not been created/mounted properly
|
||||
into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
|
||||
* The `credentials-ark` file is formatted properly and has the correct values:
|
||||
|
||||
#### Using credentials
|
||||
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `credentials-velero` file is formatted properly and has the correct values:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id=<your AWS access key ID>
|
||||
aws_secret_access_key=<your AWS secret access key>
|
||||
```
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
#### Using kube2iam
|
||||
This means that Velero can't read the content of the S3 bucket. Ensure the following:
|
||||
* There is a Trust Policy document allowing the role used by kube2iam to assume Velero's role, as stated in the AWS config documentation.
|
||||
* The new Velero role has all the permissions listed in the documentation regarding S3.
|
||||
|
||||
|
||||
## Azure
|
||||
|
||||
### `Failed to refresh the Token` or `adal: Refresh request failed`
|
||||
This means that the secrets containing the Azure service principal credentials for Ark has not been created/mounted
|
||||
properly into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
|
||||
properly into the Velero server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
|
||||
## GCE/GKE
|
||||
|
||||
### `open credentials/cloud: no such file or directory`
|
||||
This means that the secret containing the GCE service account credentials for Ark has not been created/mounted properly
|
||||
into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
[0]: azure-config#credentials-and-configuration
|
||||
[0]: azure-config#credentials-and-configuration
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
## Example
|
||||
|
||||
When Heptio Ark finishes a Restore, its status changes to "Completed" regardless of whether or not there are issues during the process. The number of warnings and errors are indicated in the output columns from `ark restore get`:
|
||||
When Velero finishes a Restore, its status changes to "Completed" regardless of whether or not there are issues during the process. The number of warnings and errors are indicated in the output columns from `velero restore get`:
|
||||
|
||||
```
|
||||
NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR
|
||||
@@ -15,14 +15,14 @@ backup-test-2-20170726180514 backup-test-2 Completed 0 0 2
|
||||
backup-test-2-20170726180515 backup-test-2 Completed 0 1 2017-07-26 13:32:59 -0400 EDT <none>
|
||||
```
|
||||
|
||||
To delve into the warnings and errors into more detail, you can use `ark restore describe`:
|
||||
To delve into the warnings and errors into more detail, you can use `velero restore describe`:
|
||||
```
|
||||
ark restore describe backup-test-20170726180512
|
||||
velero restore describe backup-test-20170726180512
|
||||
```
|
||||
The output looks like this:
|
||||
```
|
||||
Name: backup-test-20170726180512
|
||||
Namespace: heptio-ark
|
||||
Namespace: velero
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
@@ -48,10 +48,10 @@ Phase: Completed
|
||||
Validation errors: <none>
|
||||
|
||||
Warnings:
|
||||
Ark: <none>
|
||||
Velero: <none>
|
||||
Cluster: <none>
|
||||
Namespaces:
|
||||
heptio-ark: serviceaccounts "ark" already exists
|
||||
velero: serviceaccounts "velero" already exists
|
||||
serviceaccounts "default" already exists
|
||||
kube-public: serviceaccounts "default" already exists
|
||||
kube-system: serviceaccounts "attachdetach-controller" already exists
|
||||
@@ -80,7 +80,7 @@ Warnings:
|
||||
default: serviceaccounts "default" already exists
|
||||
|
||||
Errors:
|
||||
Ark: <none>
|
||||
Velero: <none>
|
||||
Cluster: <none>
|
||||
Namespaces: <none>
|
||||
```
|
||||
@@ -93,7 +93,7 @@ of them may have been pre-existing).
|
||||
|
||||
Both errors and warnings are structured in the same way:
|
||||
|
||||
* `Ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory).
|
||||
* `Velero`: A list of system-related issues encountered by the Velero server (e.g. couldn't read directory).
|
||||
|
||||
* `Cluster`: A list of issues related to the restore of cluster-scoped resources.
|
||||
|
||||
|
||||
@@ -2,22 +2,22 @@
|
||||
|
||||
*Using Schedules and Restore-Only Mode*
|
||||
|
||||
If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Heptio Ark looks like the following:
|
||||
If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Velero looks like the following:
|
||||
|
||||
1. After you first run the Ark server on your cluster, set up a daily backup (replacing `<SCHEDULE NAME>` in the command as desired):
|
||||
1. After you first run the Velero server on your cluster, set up a daily backup (replacing `<SCHEDULE NAME>` in the command as desired):
|
||||
|
||||
```
|
||||
ark schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
velero schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
```
|
||||
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
|
||||
|
||||
1. A disaster happens and you need to recreate your resources.
|
||||
|
||||
1. Update the Ark server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
|
||||
1. Create a restore with your most recent Ark Backup:
|
||||
1. Create a restore with your most recent Velero Backup:
|
||||
```
|
||||
ark restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
velero restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
# Expose Minio outside your cluster
|
||||
|
||||
When you run commands to get logs or describe a backup, the Ark server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Ark client -- you need to make Minio available outside the cluster. You can:
|
||||
|
||||
- Change the Minio Service type from `ClusterIP` to `NodePort`.
|
||||
- Set up Ingress for your cluster, keeping Minio Service type `ClusterIP`.
|
||||
|
||||
In Ark 0.10, you can also specify the value of a new `publicUrl` field for the pre-signed URL in your backup storage config.
|
||||
|
||||
For basic instructions on how to install the Ark server and client, see [the getting started example][1].
|
||||
|
||||
## Expose Minio with Service of type NodePort
|
||||
|
||||
The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Ark client.
|
||||
|
||||
You must also get the Minio URL, which you can then specify as the value of the new `publicUrl` field in your backup storage config.
|
||||
|
||||
1. In `examples/minio/00-minio-deployment.yaml`, change the value of Service `spec.type` from `ClusterIP` to `NodePort`.
|
||||
|
||||
1. Get the Minio URL:
|
||||
|
||||
- if you're running Minikube:
|
||||
|
||||
```shell
|
||||
minikube service minio --namespace=heptio-ark --url
|
||||
```
|
||||
|
||||
- in any other environment:
|
||||
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Ark client.
|
||||
|
||||
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
|
||||
|
||||
```shell
|
||||
kubectl -n heptio-ark get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
|
||||
```
|
||||
|
||||
1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide this Minio URL as the value of the `publicUrl` field. You must include the `http://` or `https://` prefix.
|
||||
|
||||
## Work with Ingress
|
||||
|
||||
Configuring Ingress for your cluster is out of scope for the Ark documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Ark configuration with Minio.
|
||||
|
||||
In this case:
|
||||
|
||||
1. Keep the Service type as `ClusterIP`.
|
||||
|
||||
1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide the URL and port of your Ingress as the value of the `publicUrl` field.
|
||||
|
||||
[1]: get-started.md
|
||||
@@ -1,9 +1,9 @@
|
||||
# Extend Ark
|
||||
# Extend Velero
|
||||
|
||||
Ark includes mechanisms for extending the core functionality to meet your individual backup/restore needs:
|
||||
Velero includes mechanisms for extending the core functionality to meet your individual backup/restore needs:
|
||||
|
||||
* [Hooks][27] allow you to specify commands to be executed within running pods during a backup. This is useful if you need to run a workload-specific command prior to taking a backup (for example, to flush disk buffers or to freeze a database).
|
||||
* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Ark without needing to be compiled into the core Ark binary.
|
||||
* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Velero without needing to be compiled into the core Velero binary.
|
||||
|
||||
[27]: hooks.md
|
||||
[28]: plugins.md
|
||||
|
||||
36
docs/faq.md
36
docs/faq.md
@@ -1,15 +1,15 @@
|
||||
# FAQ
|
||||
|
||||
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
|
||||
## When is it appropriate to use Velero instead of etcd's built in backup/restore?
|
||||
|
||||
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
|
||||
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
|
||||
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
|
||||
sophisticated management of your Kubernetes cluster backups and restores, we feel that Velero is
|
||||
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
|
||||
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up
|
||||
and restoring etcd.
|
||||
|
||||
Examples of cases where Ark is useful:
|
||||
Examples of cases where Velero is useful:
|
||||
|
||||
* you don't have access to etcd (e.g. you're running on GKE)
|
||||
* backing up both Kubernetes resources and persistent volume state
|
||||
@@ -18,20 +18,26 @@ Examples of cases where Ark is useful:
|
||||
* backing up Kubernetes resources that are stored across multiple etcd clusters (for example if you
|
||||
run a custom apiserver)
|
||||
|
||||
## Will Ark restore my Kubernetes resources exactly the way they were before?
|
||||
## Will Velero restore my Kubernetes resources exactly the way they were before?
|
||||
|
||||
Yes, with some exceptions. For example, when Ark restores pods it deletes the `nodeName` from the
|
||||
Yes, with some exceptions. For example, when Velero restores pods it deletes the `nodeName` from the
|
||||
pod so that it can be scheduled onto a new node. You can see some more examples of the differences
|
||||
in [pod_action.go](https://github.com/heptio/ark/blob/master/pkg/restore/pod_action.go)
|
||||
in [pod_action.go](https://github.com/heptio/velero/blob/master/pkg/restore/pod_action.go)
|
||||
|
||||
## I'm using Ark in multiple clusters. Should I use the same bucket to store all of my backups?
|
||||
## I'm using Velero in multiple clusters. Should I use the same bucket to store all of my backups?
|
||||
|
||||
We **strongly** recommend that you use a separate bucket per cluster to store backups. Sharing a bucket
|
||||
across multiple Ark instances can lead to numerous problems - failed backups, overwritten backups,
|
||||
inadvertently deleted backups, etc., all of which can be avoided by using a separate bucket per Ark
|
||||
instance.
|
||||
We **strongly** recommend that each Velero instance use a distinct bucket/prefix combination to store backups.
|
||||
Having multiple Velero instances write backups to the same bucket/prefix combination can lead to numerous
|
||||
problems - failed backups, overwritten backups, inadvertently deleted backups, etc., all of which can be
|
||||
avoided by using a separate bucket + prefix per Velero instance.
|
||||
|
||||
Related to this, if you need to restore a backup from cluster A into cluster B, please use restore-only
|
||||
mode in cluster B's Ark instance (via the `--restore-only` flag on the `ark server` command specified
|
||||
in your Ark deployment) while it's configured to use cluster A's bucket. This will ensure no
|
||||
new backups are created, and no existing backups are deleted or overwritten.
|
||||
It's fine to have multiple Velero instances back up to the same bucket if each instance uses its own
|
||||
prefix within the bucket. This can be configured in your `BackupStorageLocation`, by setting the
|
||||
`spec.objectStorage.prefix` field. It's also fine to use a distinct bucket for each Velero instance,
|
||||
and not to use prefixes at all.
|
||||
|
||||
Related to this, if you need to restore a backup that was created in cluster A into cluster B, you may
|
||||
configure cluster B with a backup storage location that points to cluster A's bucket/prefix. If you do
|
||||
this, you should use restore-only mode in cluster B's Velero instance (via the `--restore-only` flag on
|
||||
the `velero server` command specified in your Velero deployment) while it's configured to use cluster A's
|
||||
bucket/prefix. This will ensure no new backups are created, and no existing backups are deleted or overwritten.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Run Ark on GCP
|
||||
# Run Velero on GCP
|
||||
|
||||
You can run Kubernetes on Google Cloud Platform in either:
|
||||
|
||||
@@ -7,9 +7,25 @@ You can run Kubernetes on Google Cloud Platform in either:
|
||||
|
||||
If you do not have the `gcloud` and `gsutil` CLIs locally installed, follow the [user guide][16] to set them up.
|
||||
|
||||
## Download Velero
|
||||
|
||||
1. Download the [latest official release's](https://github.com/heptio/velero/releases) tarball for your client platform.
|
||||
|
||||
_We strongly recommend that you use an [official release](https://github.com/heptio/velero/releases) of
|
||||
Velero. The tarballs for each release contain the `velero` command-line client. The code in the master branch
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
## Create GCS bucket
|
||||
|
||||
Heptio Ark requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the <YOUR_BUCKET> placeholder with the name of your bucket:
|
||||
Velero requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the <YOUR_BUCKET> placeholder with the name of your bucket:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
@@ -19,7 +35,7 @@ gsutil mb gs://$BUCKET/
|
||||
|
||||
## Create service account
|
||||
|
||||
To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
|
||||
|
||||
1. View your current config settings:
|
||||
|
||||
@@ -36,13 +52,13 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
2. Create a service account:
|
||||
|
||||
```bash
|
||||
gcloud iam service-accounts create heptio-ark \
|
||||
--display-name "Heptio Ark service account"
|
||||
gcloud iam service-accounts create velero \
|
||||
--display-name "Velero service account"
|
||||
```
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
> If you'll be using Velero to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
Then list all accounts and find the `heptio-ark` account you just created:
|
||||
Then list all accounts and find the `velero` account you just created:
|
||||
```bash
|
||||
gcloud iam service-accounts list
|
||||
```
|
||||
@@ -51,11 +67,11 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
|
||||
```bash
|
||||
SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \
|
||||
--filter="displayName:Heptio Ark service account" \
|
||||
--filter="displayName:Velero service account" \
|
||||
--format 'value(email)')
|
||||
```
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions to function:
|
||||
3. Attach policies to give `velero` the necessary permissions to function:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -67,24 +83,25 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
compute.snapshots.create
|
||||
compute.snapshots.useReadOnly
|
||||
compute.snapshots.delete
|
||||
compute.zones.get
|
||||
)
|
||||
|
||||
gcloud iam roles create heptio_ark.server \
|
||||
gcloud iam roles create velero.server \
|
||||
--project $PROJECT_ID \
|
||||
--title "Heptio Ark Server" \
|
||||
--title "Velero Server" \
|
||||
--permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")"
|
||||
|
||||
gcloud projects add-iam-policy-binding $PROJECT_ID \
|
||||
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
|
||||
--role projects/$PROJECT_ID/roles/heptio_ark.server
|
||||
--role projects/$PROJECT_ID/roles/velero.server
|
||||
|
||||
gsutil iam ch serviceAccount:$SERVICE_ACCOUNT_EMAIL:objectAdmin gs://${BUCKET}
|
||||
```
|
||||
|
||||
4. Create a service account key, specifying an output file (`credentials-ark`) in your local directory:
|
||||
4. Create a service account key, specifying an output file (`credentials-velero`) in your local directory:
|
||||
|
||||
```bash
|
||||
gcloud iam service-accounts keys create credentials-ark \
|
||||
gcloud iam service-accounts keys create credentials-velero \
|
||||
--iam-account $SERVICE_ACCOUNT_EMAIL
|
||||
```
|
||||
|
||||
@@ -93,50 +110,40 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
If you run Google Kubernetes Engine (GKE), make sure that your current IAM user is a cluster-admin. This role is required to create RBAC objects.
|
||||
See [the GKE documentation][22] for more information.
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
## Install and start Velero
|
||||
|
||||
Install Velero, including all prerequisites, into the cluster and start the deployment. This will create a namespace called `velero`, and place a deployment named `velero` in it.
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
velero install \
|
||||
--provider gcp \
|
||||
--bucket $BUCKET \
|
||||
--secret-file ./credentials-velero
|
||||
```
|
||||
|
||||
Create a Secret. In the directory of the credentials file you just created, run:
|
||||
Additionally, you can specify `--use-restic` to enable restic support, and `--wait` to wait for the deployment to be ready.
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
(Optional) Specify `--snapshot-location-config snapshotLocation=<YOUR_LOCATION>` to keep snapshots in a specific availability zone. See the [VolumeSnapshotLocation definition][8] for details.
|
||||
|
||||
**Note: If you use a custom namespace, replace `heptio-ark` with the name of the custom namespace**
|
||||
(Optional) Specify [additional configurable parameters][7] for the `--backup-location-config` flag.
|
||||
|
||||
Specify the following values in the example files:
|
||||
(Optional) Specify [additional configurable parameters][8] for the `--snapshot-location-config` flag.
|
||||
|
||||
* In file `config/gcp/05-ark-backupstoragelocation.yaml`:
|
||||
For more complex installation needs, use either the Helm chart, or add `--dry-run -o yaml` options for generating the YAML representation for the installation.
|
||||
|
||||
* Replace `<YOUR_BUCKET>`. See the [BackupStorageLocation definition][7] for details.
|
||||
## Installing the nginx example (optional)
|
||||
|
||||
* (Optional) If you run the nginx example, in file `config/nginx-app/with-pv.yaml`:
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
|
||||
* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/gcp/10-deployment.yaml`:
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/gcp/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/gcp/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/gcp/10-deployment.yaml
|
||||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[7]: api-types/backupstoragelocation.md#gcp
|
||||
[8]: api-types/volumesnapshotlocation.md#gcp
|
||||
[15]: https://cloud.google.com/compute/docs/access/service-accounts
|
||||
[16]: https://cloud.google.com/sdk/docs/
|
||||
[20]: faq.md
|
||||
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#prerequisites_for_using_role-based_access_control
|
||||
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap
|
||||
|
||||
|
||||
@@ -1,62 +1,85 @@
|
||||
## Getting started
|
||||
|
||||
The following example sets up the Ark server and client, then backs up and restores a sample application.
|
||||
The following example sets up the Velero server and client, then backs up and restores a sample application.
|
||||
|
||||
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
|
||||
For additional functionality with this setup, see the docs on how to [expose Minio outside your cluster][31].
|
||||
|
||||
**NOTE** The example lets you explore basic Ark functionality. Configuring Minio for production is out of scope.
|
||||
**NOTE** The example lets you explore basic Velero functionality. Configuring Minio for production is out of scope.
|
||||
|
||||
See [Set up Ark on your platform][3] for how to configure Ark for a production environment.
|
||||
See [Set up Velero on your platform][3] for how to configure Velero for a production environment.
|
||||
|
||||
If you encounter issues with installing or configuring, see [Debugging Installation Issues](debugging-install.md).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. **Note:** restic support requires Kubernetes version 1.10 or later, or an earlier version with the mount propagation feature enabled. Restic support is not required for this example, but may be of interest later. See [Restic Integration][17].
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
|
||||
### Download
|
||||
### Download Velero
|
||||
|
||||
1. Download the [latest release's][26] tarball for your platform.
|
||||
1. Download the [latest official release's](https://github.com/heptio/velero/releases) tarball for your client platform.
|
||||
|
||||
_We strongly recommend that you use an [official release](https://github.com/heptio/velero/releases) of
|
||||
Velero. The tarballs for each release contain the `velero` command-line client. The code in the master branch
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
```bash
|
||||
tar -xzf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Ark directory" in subsequent steps.
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `ark` binary from the Ark directory to somewhere in your PATH.
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
#### MacOS Installation
|
||||
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `ark` client:
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client:
|
||||
```bash
|
||||
brew install ark
|
||||
brew install velero
|
||||
```
|
||||
|
||||
### Set up server
|
||||
|
||||
These instructions start the Ark server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `ark describe` commands.
|
||||
These instructions start the Velero server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `velero describe` commands.
|
||||
|
||||
1. Start the server and the local storage service. In the Ark directory, run:
|
||||
1. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = minio
|
||||
aws_secret_access_key = minio123
|
||||
```
|
||||
|
||||
1. Start the server and the local storage service. In the Velero directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
kubectl apply -f config/minio/
|
||||
kubectl apply -f examples/minio/00-minio-deployment.yaml
|
||||
|
||||
velero install \
|
||||
--provider aws \
|
||||
--bucket velero \
|
||||
--secret-file ./credentials-velero \
|
||||
--use-volume-snapshots=false \
|
||||
--backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://minio.velero.svc:9000
|
||||
```
|
||||
|
||||
This example assumes that it is running within a local cluster without a volume provider capable of snapshots, so no `VolumeSnapshotLocation` is created (`--use-volume-snapshots=false`).
|
||||
|
||||
Additionally, you can specify `--use-restic` to enable restic support, and `--wait` to wait for the deployment to be ready.
|
||||
|
||||
|
||||
1. Deploy the example nginx application:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/nginx-app/base.yaml
|
||||
kubectl apply -f examples/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
1. Check to see that both the Ark and nginx deployments are successfully created:
|
||||
1. Check to see that both the Velero and nginx deployments are successfully created:
|
||||
|
||||
```
|
||||
kubectl get deployments -l component=ark --namespace=heptio-ark
|
||||
kubectl get deployments -l component=velero --namespace=velero
|
||||
kubectl get deployments --namespace=nginx-example
|
||||
```
|
||||
|
||||
@@ -65,25 +88,25 @@ These instructions start the Ark server and a Minio instance that is accessible
|
||||
1. Create a backup for any object that matches the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
velero backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
|
||||
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector 'backup notin (ignore)'
|
||||
velero backup create nginx-backup --selector 'backup notin (ignore)'
|
||||
```
|
||||
|
||||
1. (Optional) Create regularly scheduled backups based on a cron expression using the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx
|
||||
velero schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx
|
||||
```
|
||||
|
||||
Alternatively, you can use some non-standard shorthand cron expressions:
|
||||
|
||||
```
|
||||
ark schedule create nginx-daily --schedule="@daily" --selector app=nginx
|
||||
velero schedule create nginx-daily --schedule="@daily" --selector app=nginx
|
||||
```
|
||||
|
||||
See the [cron package's documentation][30] for more usage examples.
|
||||
@@ -111,13 +134,13 @@ These instructions start the Ark server and a Minio instance that is accessible
|
||||
1. Run:
|
||||
|
||||
```
|
||||
ark restore create --from-backup nginx-backup
|
||||
velero restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
1. Run:
|
||||
|
||||
```
|
||||
ark restore get
|
||||
velero restore get
|
||||
```
|
||||
|
||||
After the restore finishes, the output looks like the following:
|
||||
@@ -134,7 +157,7 @@ After a successful restore, the `STATUS` column is `Completed`, and `WARNINGS` a
|
||||
If there are errors or warnings, you can look at them in detail:
|
||||
|
||||
```
|
||||
ark restore describe <RESTORE_NAME>
|
||||
velero restore describe <RESTORE_NAME>
|
||||
```
|
||||
|
||||
For more information, see [the debugging information][18].
|
||||
@@ -145,31 +168,101 @@ If you want to delete any backups you created, including data in object storage
|
||||
volume snapshots, you can run:
|
||||
|
||||
```
|
||||
ark backup delete BACKUP_NAME
|
||||
velero backup delete BACKUP_NAME
|
||||
```
|
||||
|
||||
This asks the Ark server to delete all backup data associated with `BACKUP_NAME`. You need to do
|
||||
this for each backup you want to permanently delete. A future version of Ark will allow you to
|
||||
This asks the Velero server to delete all backup data associated with `BACKUP_NAME`. You need to do
|
||||
this for each backup you want to permanently delete. A future version of Velero will allow you to
|
||||
delete multiple backups by name or label selector.
|
||||
|
||||
Once fully removed, the backup is no longer visible when you run:
|
||||
|
||||
```
|
||||
ark backup get BACKUP_NAME
|
||||
velero backup get BACKUP_NAME
|
||||
```
|
||||
|
||||
If you want to uninstall Ark but preserve the backup data in object storage and persistent volume
|
||||
snapshots, it is safe to remove the `heptio-ark` namespace and everything else created for this
|
||||
example:
|
||||
To completely uninstall Velero, minio, and the nginx example app from your Kubernetes cluster:
|
||||
|
||||
```
|
||||
kubectl delete -f config/common/
|
||||
kubectl delete -f config/minio/
|
||||
kubectl delete -f config/nginx-app/base.yaml
|
||||
kubectl delete namespace/velero clusterrolebinding/velero
|
||||
kubectl delete crds -l component=velero
|
||||
kubectl delete -f examples/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
[31]: expose-minio.md
|
||||
## Expose Minio outside your cluster with a Service
|
||||
|
||||
When you run commands to get logs or describe a backup, the Velero server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Velero client -- you need to make Minio available outside the cluster. You can:
|
||||
|
||||
- Change the Minio Service type from `ClusterIP` to `NodePort`.
|
||||
- Set up Ingress for your cluster, keeping Minio Service type `ClusterIP`.
|
||||
|
||||
You can also specify a `publicUrl` config field for the pre-signed URL in your backup storage location config.
|
||||
|
||||
For basic instructions on how to install the Velero server and client, see [the getting started example][1].
|
||||
|
||||
### Expose Minio with Service of type NodePort
|
||||
|
||||
The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Velero client.
|
||||
|
||||
You must also get the Minio URL, which you can then specify as the value of the `publicUrl` field in your backup storage location config.
|
||||
|
||||
1. In `examples/minio/00-minio-deployment.yaml`, change the value of Service `spec.type` from `ClusterIP` to `NodePort`.
|
||||
|
||||
1. Get the Minio URL:
|
||||
|
||||
- if you're running Minikube:
|
||||
|
||||
```shell
|
||||
minikube service minio --namespace=velero --url
|
||||
```
|
||||
|
||||
- in any other environment:
|
||||
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
|
||||
|
||||
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
|
||||
|
||||
```shell
|
||||
kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
|
||||
```
|
||||
|
||||
1. Edit your `BackupStorageLocation` YAML, adding `publicUrl: <URL_FROM_PREVIOUS_STEP>` as a field under `spec.config`. You must include the `http://` or `https://` prefix.
|
||||
|
||||
## Expose Minio outside your cluster with Kubernetes in Docker (KinD):
|
||||
|
||||
Kubernetes in Docker currently does not have support for NodePort services (see [this issue](https://github.com/kubernetes-sigs/kind/issues/99)). In this case, you can use a port forward to access the Minio bucket.
|
||||
|
||||
In a terminal, run the following:
|
||||
|
||||
```shell
|
||||
MINIO_POD=$(kubectl get pods -n velero -l component=minio -o jsonpath='{.items[0].metadata.name}')
|
||||
|
||||
kubectl port-forwward $MINIO_POD -n velero 9000:9000
|
||||
```
|
||||
|
||||
Then, in another terminal:
|
||||
|
||||
```shell
|
||||
kubectl edit backupstoragelocation default -n velero
|
||||
```
|
||||
|
||||
Add `publicUrl: http://localhost:9000` under the `spec.config` section.
|
||||
|
||||
### Work with Ingress
|
||||
|
||||
Configuring Ingress for your cluster is out of scope for the Velero documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Velero configuration with Minio.
|
||||
|
||||
In this case:
|
||||
|
||||
1. Keep the Service type as `ClusterIP`.
|
||||
|
||||
1. Edit your `BackupStorageLocation` YAML, adding `publicUrl: <URL_AND_PORT_OF_INGRESS>` as a field under `spec.config`.
|
||||
|
||||
|
||||
[1]: get-started.md
|
||||
[3]: install-overview.md
|
||||
[17]: restic.md
|
||||
[18]: debugging-restores.md
|
||||
[26]: https://github.com/heptio/ark/releases
|
||||
[26]: https://github.com/heptio/velero/releases
|
||||
[30]: https://godoc.org/github.com/robfig/cron
|
||||
[31]: #expose-minio-outside-your-cluster
|
||||
|
||||
@@ -1,45 +1,38 @@
|
||||
# Hooks
|
||||
|
||||
Heptio Ark currently supports executing commands in containers in pods during a backup.
|
||||
Velero currently supports executing commands in containers in pods during a backup.
|
||||
|
||||
## Backup Hooks
|
||||
|
||||
When performing a backup, you can specify one or more commands to execute in a container in a pod
|
||||
when that pod is being backed up.
|
||||
|
||||
Ark versions prior to v0.7.0 only support hooks that execute prior to any custom action processing
|
||||
("pre" hooks).
|
||||
|
||||
As of version v0.7.0, Ark also supports "post" hooks - these execute after all custom actions have
|
||||
completed, as well as after all the additional items specified by custom actions have been backed
|
||||
up.
|
||||
when that pod is being backed up. The commands can be configured to run *before* any custom action
|
||||
processing ("pre" hooks), or after all custom actions have been completed and any additional items
|
||||
specified by custom action have been backed up ("post" hooks).
|
||||
|
||||
There are two ways to specify hooks: annotations on the pod itself, and in the Backup spec.
|
||||
|
||||
### Specifying Hooks As Pod Annotations
|
||||
|
||||
You can use the following annotations on a pod to make Ark execute a hook when backing up the pod:
|
||||
You can use the following annotations on a pod to make Velero execute a hook when backing up the pod:
|
||||
|
||||
#### Pre hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `pre.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
|
||||
Ark v0.7.0+ continues to support the original (deprecated) way to specify pre hooks - without the
|
||||
`pre.` prefix in the annotation names (e.g. `hook.backup.ark.heptio.com/container`).
|
||||
|
||||
#### Post hooks (v0.7.0+)
|
||||
#### Post hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `post.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
|
||||
### Specifying Hooks in the Backup Spec
|
||||
|
||||
@@ -56,25 +49,25 @@ setup this example.
|
||||
|
||||
### Annotations
|
||||
|
||||
The Ark [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly
|
||||
The Velero [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly
|
||||
to your declarative deployment. Below is an example of what updating an object in place might look like.
|
||||
|
||||
```shell
|
||||
kubectl annotate pod -n nginx-example -l app=nginx \
|
||||
pre.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \
|
||||
pre.hook.backup.ark.heptio.com/container=fsfreeze \
|
||||
post.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \
|
||||
post.hook.backup.ark.heptio.com/container=fsfreeze
|
||||
pre.hook.backup.velero.io/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \
|
||||
pre.hook.backup.velero.io/container=fsfreeze \
|
||||
post.hook.backup.velero.io/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \
|
||||
post.hook.backup.velero.io/container=fsfreeze
|
||||
```
|
||||
|
||||
Now test the pre and post hooks by creating a backup. You can use the Ark logs to verify that the pre and post
|
||||
Now test the pre and post hooks by creating a backup. You can use the Velero logs to verify that the pre and post
|
||||
hooks are running and exiting without error.
|
||||
|
||||
```shell
|
||||
ark backup create nginx-hook-test
|
||||
velero backup create nginx-hook-test
|
||||
|
||||
ark backup get nginx-hook-test
|
||||
ark backup logs nginx-hook-test | grep hookCommand
|
||||
velero backup get nginx-hook-test
|
||||
velero backup logs nginx-hook-test | grep hookCommand
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,31 +1,47 @@
|
||||
# Use IBM Cloud Object Storage as Ark's storage destination.
|
||||
You can deploy Ark on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Ark's backups.
|
||||
# Use IBM Cloud Object Storage as Velero's storage destination.
|
||||
You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups.
|
||||
|
||||
To set up IBM Cloud Object Storage (COS) as Ark's destination, you:
|
||||
To set up IBM Cloud Object Storage (COS) as Velero's destination, you:
|
||||
|
||||
* Download an official release of Velero
|
||||
* Create your COS instance
|
||||
* Create an S3 bucket
|
||||
* Define a service that can store data in the bucket
|
||||
* Configure and start the Ark server
|
||||
* Configure and start the Velero server
|
||||
|
||||
## Download Velero
|
||||
|
||||
1. Download the [latest official release's](https://github.com/heptio/velero/releases) tarball for your client platform.
|
||||
|
||||
_We strongly recommend that you use an [official release](https://github.com/heptio/velero/releases) of
|
||||
Velero. The tarballs for each release contain the `velero` command-line client. The code in the master branch
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
## Create COS instance
|
||||
If you don’t have a COS instance, you can create a new one, according to the detailed instructions in [Creating a new resource instance][1].
|
||||
|
||||
## Create an S3 bucket
|
||||
Heptio Ark requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2].
|
||||
Velero requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2].
|
||||
|
||||
## Define a service that can store data in the bucket.
|
||||
The process of creating service credentials is described in [Service credentials][3].
|
||||
Several comments:
|
||||
|
||||
1. The Ark service will write its backup into the bucket, so it requires the “Writer” access role.
|
||||
1. The Velero service will write its backup into the bucket, so it requires the “Writer” access role.
|
||||
|
||||
2. Ark uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide.
|
||||
2. Velero uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide.
|
||||
|
||||
3. After successfully creating a Service credential, you can view the JSON definition of the credential. Under the `cos_hmac_keys` entry there are `access_key_id` and `secret_access_key`. We will use them in the next step.
|
||||
|
||||
4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory:
|
||||
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
@@ -35,40 +51,37 @@ Several comments:
|
||||
|
||||
where the access key id and secret are the values that we got above.
|
||||
|
||||
## Credentials and configuration
|
||||
## Install and start Velero
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
Install Velero, including all prerequisites, into the cluster and start the deployment. This will create a namespace called `velero`, and place a deployment named `velero` in it.
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
velero install \
|
||||
--provider aws \
|
||||
--bucket <YOUR_BUCKET> \
|
||||
--secret-file ./credentials-velero \
|
||||
--use-volume-snapshots=false \
|
||||
--backup-location-config region=<YOUR_REGION>,s3ForcePathStyle="true",s3Url=<YOUR_URL_ACCESS_POINT>
|
||||
```
|
||||
|
||||
Create a Secret. In the directory of the credentials file you just created, run:
|
||||
Velero does not currently have a volume snapshot plugin for IBM Cloud, so creating volume snapshots is disabled.
|
||||
|
||||
Additionally, you can specify `--use-restic` to enable restic support, and `--wait` to wait for the deployment to be ready.
|
||||
|
||||
Once the installation is complete, remove the default `VolumeSnapshotLocation` that was created by `velero install`, since it's specific to AWS and won't work for IBM Cloud:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-file cloud=credentials-ark
|
||||
kubectl -n velero delete volumesnapshotlocation.velero.io default
|
||||
```
|
||||
|
||||
Specify the following values in the example files:
|
||||
For more complex installation needs, use either the Helm chart, or add `--dry-run -o yaml` options for generating the YAML representation for the installation.
|
||||
|
||||
* In `config/ibm/05-ark-backupstoragelocation.yaml`:
|
||||
## Installing the nginx example (optional)
|
||||
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_REGION>` and `<YOUR_URL_ACCESS_POINT>`. See the [BackupStorageLocation definition][6] for details.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `config/nginx-app/with-pv.yaml`:
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
|
||||
## Start the Ark server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/ibm/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/ibm/10-deployment.yaml
|
||||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[1]: https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#creating-a-new-resource-instance
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
# Image tagging policy
|
||||
|
||||
This document describes Ark's image tagging policy.
|
||||
This document describes Velero's image tagging policy.
|
||||
|
||||
## Released versions
|
||||
|
||||
`gcr.io/heptio-images/ark:<SemVer>`
|
||||
`gcr.io/heptio-images/velero:<SemVer>`
|
||||
|
||||
Ark follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/ark` repository has a matching image, e.g. `gcr.io/heptio-images/ark:v0.8.0`.
|
||||
Velero follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/velero` repository has a matching image, e.g. `gcr.io/heptio-images/velero:v1.0.0`.
|
||||
|
||||
### Latest
|
||||
|
||||
`gcr.io/heptio-images/ark:latest`
|
||||
`gcr.io/heptio-images/velero:latest`
|
||||
|
||||
The `latest` tag follows the most recently released version of Ark.
|
||||
The `latest` tag follows the most recently released version of Velero.
|
||||
|
||||
## Development
|
||||
|
||||
`gcr.io/heptio-images/ark:master`
|
||||
`gcr.io/heptio-images/velero:master`
|
||||
|
||||
The `master` tag follows the latest commit to land on the `master` branch.
|
||||
The `master` tag follows the latest commit to land on the `master` branch.
|
||||
|
||||
BIN
docs/img/velero.png
Normal file
BIN
docs/img/velero.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 44 KiB |
@@ -1,55 +1,84 @@
|
||||
# Set up Ark on your platform
|
||||
# Set up Velero on your platform
|
||||
|
||||
You can run Ark with a cloud provider or on-premises. For detailed information about the platforms that Ark supports, see [Compatible Storage Providers][99].
|
||||
You can run Velero with a cloud provider or on-premises. For detailed information about the platforms that Velero supports, see [Compatible Storage Providers][99].
|
||||
|
||||
In version 0.7.0 and later, you can run Ark in any namespace, which requires additional customization. See [Run in custom namespace][3].
|
||||
You can run Velero in any namespace, which requires additional customization. See [Run in custom namespace][3].
|
||||
|
||||
In version 0.9.0 and later, you can use Ark's integration with restic, which requires additional setup. See [restic instructions][20].
|
||||
You can also use Velero's integration with restic, which requires additional setup. See [restic instructions][20].
|
||||
|
||||
## Customize configuration
|
||||
|
||||
Whether you run Ark on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Ark deployment YAML.
|
||||
Whether you run Velero on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Velero deployment YAML.
|
||||
|
||||
For details, see the documentation topics for individual cloud providers.
|
||||
|
||||
## Cloud provider
|
||||
|
||||
The Ark repository includes a set of example YAML files that specify the settings for each supported cloud provider. For provider-specific instructions, see:
|
||||
The Velero client includes an `install` command to specify the settings for each supported cloud provider. You can install Velero for the included cloud providers using the following command:
|
||||
|
||||
* [Run Ark on AWS][0]
|
||||
* [Run Ark on GCP][1]
|
||||
* [Run Ark on Azure][2]
|
||||
* [Use IBM Cloud Object Store as Ark's storage destination][4]
|
||||
```bash
|
||||
velero install \
|
||||
--provider <YOUR_PROVIDER> \
|
||||
--bucket <YOUR_BUCKET> \
|
||||
--secret-file <PATH_TO_FILE> \
|
||||
[--backup-location-config]
|
||||
[--snapshot-location-config]
|
||||
[--namespace]
|
||||
[--use-volume-snapshots]
|
||||
[--use-restic]
|
||||
```
|
||||
|
||||
For provider-specific instructions, see:
|
||||
|
||||
* [Run Velero on AWS][0]
|
||||
* [Run Velero on GCP][1]
|
||||
* [Run Velero on Azure][2]
|
||||
* [Use IBM Cloud Object Store as Velero's storage destination][4]
|
||||
|
||||
When using restic on a storage provider that doesn't currently have Velero support for snapshots, the `--use-volume-snapshots=false` flag prevents an unused `VolumeSnapshotLocation` from being created on installation.
|
||||
|
||||
To see the YAML applied by the `velero install` command, use the `--dry-run -o yaml` arguments.
|
||||
|
||||
For more complex installation needs, use either the generated YAML, or the Helm chart.
|
||||
|
||||
## On-premises
|
||||
|
||||
You can run Ark in an on-premises cluster in different ways depending on your requirements.
|
||||
You can run Velero in an on-premises cluster in different ways depending on your requirements.
|
||||
|
||||
First, you must select an object storage backend that Ark can use to store backup data. [Compatible Storage Providers][99] contains information on various
|
||||
First, you must select an object storage backend that Velero can use to store backup data. [Compatible Storage Providers][99] contains information on various
|
||||
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
|
||||
not using another storage platform that offers an S3-compatible object storage API.
|
||||
|
||||
Second, if you need to back up persistent volume data, you must select a volume backup solution. [Volume Snapshot Providers][100] contains information on
|
||||
the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Ark plugin to get native Portworx snapshots as part
|
||||
of your Ark backups. If there is no native snapshot plugin available for your storage platform, you can use Ark's [restic integration][20], which provides a
|
||||
the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Velero plugin to get native Portworx snapshots as part
|
||||
of your Velero backups. If there is no native snapshot plugin available for your storage platform, you can use Velero's [restic integration][20], which provides a
|
||||
platform-agnostic backup solution for volume data.
|
||||
|
||||
## Removing Velero
|
||||
|
||||
If you would like to completely uninstall Velero from your cluster, the following commands will remove all resources created by `velero install`:
|
||||
|
||||
```bash
|
||||
kubectl delete namespace/velero clusterrolebinding/velero
|
||||
kubectl delete crds -l component=velero
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
After you set up the Ark server, try these examples:
|
||||
After you set up the Velero server, try these examples:
|
||||
|
||||
### Basic example (without PersistentVolumes)
|
||||
|
||||
1. Start the sample nginx app:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/nginx-app/base.yaml
|
||||
kubectl apply -f examples/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
1. Create a backup:
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --include-namespaces nginx-example
|
||||
velero backup create nginx-backup --include-namespaces nginx-example
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
@@ -63,7 +92,7 @@ After you set up the Ark server, try these examples:
|
||||
1. Restore your lost resources:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup nginx-backup
|
||||
velero restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
### Snapshot example (with PersistentVolumes)
|
||||
@@ -73,13 +102,13 @@ After you set up the Ark server, try these examples:
|
||||
1. Start the sample nginx app:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/nginx-app/with-pv.yaml
|
||||
kubectl apply -f examples/nginx-app/with-pv.yaml
|
||||
```
|
||||
|
||||
1. Create a backup with PV snapshotting:
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --include-namespaces nginx-example
|
||||
velero backup create nginx-backup --include-namespaces nginx-example
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
@@ -93,7 +122,7 @@ After you set up the Ark server, try these examples:
|
||||
1. Restore your lost resources:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup nginx-backup
|
||||
velero restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
[0]: aws-config.md
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
Copyright 2018 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/heptio/ark/pkg/cmd/cli/bug"
|
||||
"github.com/heptio/velero/pkg/cmd/cli/bug"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -38,7 +38,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = tmpl.Execute(outFile, bug.ArkBugInfo{})
|
||||
err = tmpl.Execute(outFile, bug.VeleroBugInfo{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1,55 +1,47 @@
|
||||
# Backup Storage Locations and Volume Snapshot Locations
|
||||
|
||||
Ark v0.10 introduces a new way of configuring where Ark backups and their associated persistent volume snapshots are stored.
|
||||
|
||||
## Motivations
|
||||
|
||||
In Ark versions prior to v0.10, the configuration for where to store backups & volume snapshots is specified in a `Config` custom resource. The `backupStorageProvider` section captures the place where all Ark backups should be stored. This is defined by a **provider** (e.g. `aws`, `azure`, `gcp`, `minio`, etc.), a **bucket**, and possibly some additional provider-specific settings (e.g. `region`). Similarly, the `persistentVolumeProvider` section captures the place where all persistent volume snapshots taken as part of Ark backups should be stored, and is defined by a **provider** and additional provider-specific settings (e.g. `region`).
|
||||
|
||||
There are a number of use cases that this basic design does not support, such as:
|
||||
|
||||
- Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
- Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
- For volume providers that support it (e.g. Portworx), have some snapshots be stored locally on the cluster and have others be stored in the cloud
|
||||
|
||||
Additionally, as we look ahead to backup replication, a major feature on our roadmap, we know that we'll need Ark to be able to support multiple possible storage locations.
|
||||
|
||||
## Overview
|
||||
|
||||
In Ark v0.10 we got rid of the `Config` custom resource, and replaced it with two new custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`. The new resources directly replace the legacy `backupStorageProvider` and `persistentVolumeProvider` sections of the `Config` resource, respectively.
|
||||
Velero has two custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`, that are used to configure where Velero backups and their associated persistent volume snapshots are stored.
|
||||
|
||||
Now, the user can pre-define more than one possible `BackupStorageLocation` and more than one `VolumeSnapshotLocation`, and can select *at backup creation time* the location in which the backup and associated snapshots should be stored.
|
||||
|
||||
A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Ark data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider.
|
||||
A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Velero data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider.
|
||||
|
||||
A `VolumeSnapshotLocation` is defined entirely by provider-specific fields (e.g. AWS region, Azure resource group, Portworx snapshot type, etc.) The [API documentation][2] captures the configurable parameters for each in-tree provider.
|
||||
|
||||
Additionally, since multiple `VolumeSnapshotLocations` can be created, the user can now configure locations for more than one volume provider, and if the cluster has volumes from multiple providers (e.g. AWS EBS and Portworx), all of them can be snapshotted in a single Ark backup.
|
||||
The user can pre-configure one or more possible `BackupStorageLocations` and one or more `VolumeSnapshotLocations`, and can select *at backup creation time* the location in which the backup and associated snapshots should be stored.
|
||||
|
||||
This configuration design enables a number of different use cases, including:
|
||||
|
||||
- Take snapshots of more than one kind of persistent volume in a single Velero backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
- Have some Velero backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
- For volume providers that support it (e.g. Portworx), have some snapshots be stored locally on the cluster and have others be stored in the cloud
|
||||
|
||||
## Limitations / Caveats
|
||||
|
||||
- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take an Ark backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail.
|
||||
- Velero only supports a single set of credentials *per provider*. It's not yet possible to use different credentials for different locations, if they're for the same provider.
|
||||
|
||||
- Each Ark backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Ark backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important.
|
||||
- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take a Velero backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail.
|
||||
|
||||
- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Ark will **only** snapshot the EBS volumes.
|
||||
- Each Velero backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Velero backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important.
|
||||
|
||||
- Restic data is now stored under a prefix/subdirectory of the main Ark bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time.
|
||||
- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Velero will **only** snapshot the EBS volumes.
|
||||
|
||||
- Restic data is stored under a prefix/subdirectory of the main Velero bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time.
|
||||
|
||||
## Examples
|
||||
|
||||
Let's look at some examples of how we can use this new mechanism to address each of our previously unsupported use cases:
|
||||
Let's look at some examples of how we can use this configuration mechanism to address some common use cases:
|
||||
|
||||
#### Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
#### Take snapshots of more than one kind of persistent volume in a single Velero backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark snapshot-location create ebs-us-east-1 \
|
||||
velero snapshot-location create ebs-us-east-1 \
|
||||
--provider aws \
|
||||
--config region=us-east-1
|
||||
|
||||
ark snapshot-location create portworx-cloud \
|
||||
velero snapshot-location create portworx-cloud \
|
||||
--provider portworx \
|
||||
--config type=cloud
|
||||
```
|
||||
@@ -57,43 +49,43 @@ ark snapshot-location create portworx-cloud \
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
ark backup create full-cluster-backup \
|
||||
velero backup create full-cluster-backup \
|
||||
--volume-snapshot-locations ebs-us-east-1,portworx-cloud
|
||||
```
|
||||
|
||||
Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Ark doesn't require them to be explicitly specified when creating the backup:
|
||||
Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Velero doesn't require them to be explicitly specified when creating the backup:
|
||||
|
||||
```shell
|
||||
ark backup create full-cluster-backup
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
|
||||
#### Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
#### Have some Velero backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark backup-location create default \
|
||||
velero backup-location create default \
|
||||
--provider aws \
|
||||
--bucket ark-backups \
|
||||
--bucket velero-backups \
|
||||
--config region=us-east-1
|
||||
|
||||
ark backup-location create s3-alt-region \
|
||||
velero backup-location create s3-alt-region \
|
||||
--provider aws \
|
||||
--bucket ark-backups-alt \
|
||||
--bucket velero-backups-alt \
|
||||
--config region=us-west-1
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
```shell
|
||||
# The Ark server will automatically store backups in the backup storage location named "default" if
|
||||
# The Velero server will automatically store backups in the backup storage location named "default" if
|
||||
# one is not specified when creating the backup. You can alter which backup storage location is used
|
||||
# by default by setting the --default-backup-storage-location flag on the `ark server` command (run
|
||||
# by the Ark deployment) to the name of a different backup storage location.
|
||||
ark backup create full-cluster-backup
|
||||
# by default by setting the --default-backup-storage-location flag on the `velero server` command (run
|
||||
# by the Velero deployment) to the name of a different backup storage location.
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
Or:
|
||||
```shell
|
||||
ark backup create full-cluster-alternate-location-backup \
|
||||
velero backup create full-cluster-alternate-location-backup \
|
||||
--storage-location s3-alt-region
|
||||
```
|
||||
|
||||
@@ -102,11 +94,11 @@ ark backup create full-cluster-alternate-location-backup \
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark snapshot-location create portworx-local \
|
||||
velero snapshot-location create portworx-local \
|
||||
--provider portworx \
|
||||
--config type=local
|
||||
|
||||
ark snapshot-location create portworx-cloud \
|
||||
velero snapshot-location create portworx-cloud \
|
||||
--provider portworx \
|
||||
--config type=cloud
|
||||
```
|
||||
@@ -116,49 +108,49 @@ During backup creation:
|
||||
```shell
|
||||
# Note that since in this example we have two possible volume snapshot locations for the Portworx
|
||||
# provider, we need to explicitly specify which one to use when creating a backup. Alternately,
|
||||
# you can set the --default-volume-snapshot-locations flag on the `ark server` command (run by
|
||||
# the Ark deployment) to specify which location should be used for each provider by default, in
|
||||
# you can set the --default-volume-snapshot-locations flag on the `velero server` command (run by
|
||||
# the Velero deployment) to specify which location should be used for each provider by default, in
|
||||
# which case you don't need to specify it when creating a backup.
|
||||
ark backup create local-snapshot-backup \
|
||||
velero backup create local-snapshot-backup \
|
||||
--volume-snapshot-locations portworx-local
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```shell
|
||||
ark backup create cloud-snapshot-backup \
|
||||
velero backup create cloud-snapshot-backup \
|
||||
--volume-snapshot-locations portworx-cloud
|
||||
```
|
||||
|
||||
#### One location is still easy
|
||||
#### Use a single location
|
||||
|
||||
If you don't have a use case for more than one location, it's still just as easy to use Ark. Let's assume you're running on AWS, in the `us-west-1` region:
|
||||
If you don't have a use case for more than one location, it's still easy to use Velero. Let's assume you're running on AWS, in the `us-west-1` region:
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark backup-location create default \
|
||||
velero backup-location create default \
|
||||
--provider aws \
|
||||
--bucket ark-backups \
|
||||
--bucket velero-backups \
|
||||
--config region=us-west-1
|
||||
|
||||
ark snapshot-location create ebs-us-west-1 \
|
||||
velero snapshot-location create ebs-us-west-1 \
|
||||
--provider aws \
|
||||
--config region=us-west-1
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
```shell
|
||||
# Ark's will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Nothing new needs to be specified when creating a backup.
|
||||
ark backup create full-cluster-backup
|
||||
# Velero will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Nothing needs to be specified when creating a backup.
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
|
||||
## Additional Use Cases
|
||||
|
||||
1. If you're using Azure's AKS, you may want to store your volume snapshots outside of the "infrastructure" resource group that is automatically created when you create your AKS cluster. This is now possible using a `VolumeSnapshotLocation`, by specifying a `resourceGroup` under the `config` section of the snapshot location. See the [Azure volume snapshot location documentation][3] for details.
|
||||
1. If you're using Azure's AKS, you may want to store your volume snapshots outside of the "infrastructure" resource group that is automatically created when you create your AKS cluster. This is possible using a `VolumeSnapshotLocation`, by specifying a `resourceGroup` under the `config` section of the snapshot location. See the [Azure volume snapshot location documentation][3] for details.
|
||||
|
||||
1. If you're using Azure, you may want to store your Ark backups across multiple storage accounts and/or resource groups. This is now possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details.
|
||||
1. If you're using Azure, you may want to store your Velero backups across multiple storage accounts and/or resource groups. This is possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details.
|
||||
|
||||
|
||||
|
||||
|
||||
82
docs/migrating-to-velero.md
Normal file
82
docs/migrating-to-velero.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Migrating from Heptio Ark to Velero
|
||||
|
||||
As of v0.11.0, Heptio Ark has become Velero. This means the following changes have been made:
|
||||
|
||||
* The `ark` CLI client is now `velero`.
|
||||
* The default Kubernetes namespace and ServiceAccount are now named `velero` (formerly `heptio-ark`).
|
||||
* The container image name is now `gcr.io/heptio-images/velero` (formerly `gcr.io/heptio-images/ark`).
|
||||
* CRDs are now under the new `velero.io` API group name (formerly `ark.heptio.com`).
|
||||
|
||||
|
||||
The following instructions will help you migrate your existing Ark installation to Velero.
|
||||
|
||||
# Prerequisites
|
||||
|
||||
* Ark v0.10.x installed. See the v0.10.x [upgrade instructions][1] to upgrade from older versions.
|
||||
* `kubectl` installed.
|
||||
* `cluster-admin` permissions.
|
||||
|
||||
# Migration process
|
||||
|
||||
At a high level, the migration process involves the following steps:
|
||||
|
||||
* Scale down the `ark` deployment, so it will not process schedules, backups, or restores during the migration period.
|
||||
* Create a new namespace (named `velero` by default).
|
||||
* Apply the new CRDs.
|
||||
* Migrate existing Ark CRD objects, labels, and annotations to the new Velero equivalents.
|
||||
* Recreate the existing cloud credentials secret(s) in the velero namespace.
|
||||
* Apply the updated Kubernetes deployment and daemonset (for restic support) to use the new container images and namespace.
|
||||
* Remove the existing Ark namespace (which includes the deployment), CRDs, and ClusterRoleBinding.
|
||||
|
||||
These steps are provided in a script here:
|
||||
|
||||
```bash
|
||||
kubectl scale --namespace heptio-ark deployment/ark --replicas 0
|
||||
OS=$(uname | tr '[:upper:]' '[:lower:]') # Determine if the OS is Linux or macOS
|
||||
ARCH="amd64"
|
||||
|
||||
# Download the velero client/example tarball to and unpack
|
||||
curl -L https://github.com/heptio/velero/releases/download/v0.11.0/velero-v0.11.0-${OS}-${ARCH}.tar.gz --output velero-v0.11.0-${OS}-${ARCH}.tar.gz
|
||||
tar xvf velero-v0.11.0-${OS}-${ARCH}.tar.gz
|
||||
|
||||
# Create the prerequisite CRDs and namespace
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
||||
# Download and unpack the crd-migrator tool
|
||||
curl -L https://github.com/vmware/crd-migration-tool/releases/download/v1.0.0/crd-migration-tool-v1.0.0-${OS}-${ARCH}.tar.gz --output crd-migration-tool-v1.0.0-${OS}-${ARCH}.tar.gz
|
||||
tar xvf crd-migration-tool-v1.0.0-${OS}-${ARCH}.tar.gz
|
||||
|
||||
# Run the tool against your cluster.
|
||||
./crd-migrator \
|
||||
--from ark.heptio.com/v1 \
|
||||
--to velero.io/v1 \
|
||||
--label-mappings ark.heptio.com:velero.io,ark-schedule:velero.io/schedule-name \
|
||||
--annotation-mappings ark.heptio.com:velero.io \
|
||||
--namespace-mappings heptio-ark:velero
|
||||
|
||||
|
||||
# Copy the necessary secret from the ark namespace
|
||||
kubectl get secret --namespace heptio-ark cloud-credentials --export -o yaml | kubectl apply --namespace velero -f -
|
||||
|
||||
# Apply the Velero deployment and restic DaemonSet for your platform
|
||||
## GCP
|
||||
#kubectl apply -f config/gcp/10-deployment.yaml
|
||||
#kubectl apply -f config/gcp/20-restic-daemonset.yaml
|
||||
## AWS
|
||||
#kubectl apply -f config/aws/10-deployment.yaml
|
||||
#kubectl apply -f config/aws/20-restic-daemonset.yaml
|
||||
## Azure
|
||||
#kubectl apply -f config/azure/00-deployment.yaml
|
||||
#kubectl apply -f config/azure/20-restic-daemonset.yaml
|
||||
|
||||
# Verify your data is still present
|
||||
./velero get backup
|
||||
./velero get restore
|
||||
|
||||
# Remove old Ark data
|
||||
kubectl delete namespace heptio-ark
|
||||
kubectl delete crds -l component=ark
|
||||
kubectl delete clusterrolebindings -l component=ark
|
||||
```
|
||||
|
||||
[1]: https://heptio.github.io/velero/v0.10.0/upgrading-to-v0.10
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user