mirror of
https://github.com/vmware-tanzu/velero.git
synced 2025-12-23 06:15:21 +00:00
Docs updates - extra finishing touches (#1516)
* Docs formatting updates Signed-off-by: Jonas Rosland <jrosland@vmware.com>
This commit is contained in:
committed by
Nolan Brubaker
parent
82e464672b
commit
bbb11a8d23
2
.gitignore
vendored
2
.gitignore
vendored
@@ -43,5 +43,7 @@ site/.sass-cache
|
||||
site/.jekyll
|
||||
site/.jekyll-metadata
|
||||
site/.bundle
|
||||
site/vendor
|
||||
.ruby-version
|
||||
|
||||
.vs
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
source 'https://rubygems.org'
|
||||
gem 'github-pages'
|
||||
gem 'redcarpet'
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.10)
|
||||
activesupport (4.2.11.1)
|
||||
i18n (~> 0.7)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
addressable (2.5.2)
|
||||
addressable (2.6.0)
|
||||
public_suffix (>= 2.0.2, < 4.0)
|
||||
coffee-script (2.4.1)
|
||||
coffee-script-source
|
||||
@@ -15,38 +15,38 @@ GEM
|
||||
colorator (1.1.0)
|
||||
commonmarker (0.17.13)
|
||||
ruby-enum (~> 0.5)
|
||||
concurrent-ruby (1.1.3)
|
||||
concurrent-ruby (1.1.5)
|
||||
dnsruby (1.61.2)
|
||||
addressable (~> 2.5)
|
||||
em-websocket (0.5.1)
|
||||
eventmachine (>= 0.12.9)
|
||||
http_parser.rb (~> 0.6.0)
|
||||
ethon (0.11.0)
|
||||
ethon (0.12.0)
|
||||
ffi (>= 1.3.0)
|
||||
eventmachine (1.2.7)
|
||||
execjs (2.7.0)
|
||||
faraday (0.15.3)
|
||||
faraday (0.15.4)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
ffi (1.9.25)
|
||||
ffi (1.11.1)
|
||||
forwardable-extended (2.6.0)
|
||||
gemoji (3.0.0)
|
||||
github-pages (192)
|
||||
activesupport (= 4.2.10)
|
||||
github-pages-health-check (= 1.8.1)
|
||||
jekyll (= 3.7.4)
|
||||
gemoji (3.0.1)
|
||||
github-pages (198)
|
||||
activesupport (= 4.2.11.1)
|
||||
github-pages-health-check (= 1.16.1)
|
||||
jekyll (= 3.8.5)
|
||||
jekyll-avatar (= 0.6.0)
|
||||
jekyll-coffeescript (= 1.1.1)
|
||||
jekyll-commonmark-ghpages (= 0.1.5)
|
||||
jekyll-default-layout (= 0.1.4)
|
||||
jekyll-feed (= 0.10.0)
|
||||
jekyll-feed (= 0.11.0)
|
||||
jekyll-gist (= 1.5.0)
|
||||
jekyll-github-metadata (= 2.9.4)
|
||||
jekyll-github-metadata (= 2.12.1)
|
||||
jekyll-mentions (= 1.4.1)
|
||||
jekyll-optional-front-matter (= 0.3.0)
|
||||
jekyll-paginate (= 1.1.0)
|
||||
jekyll-readme-index (= 0.2.0)
|
||||
jekyll-redirect-from (= 0.14.0)
|
||||
jekyll-relative-links (= 0.5.3)
|
||||
jekyll-relative-links (= 0.6.0)
|
||||
jekyll-remote-theme (= 0.3.1)
|
||||
jekyll-sass-converter (= 1.5.2)
|
||||
jekyll-seo-tag (= 2.5.0)
|
||||
@@ -66,28 +66,28 @@ GEM
|
||||
jekyll-theme-tactile (= 0.1.1)
|
||||
jekyll-theme-time-machine (= 0.1.1)
|
||||
jekyll-titles-from-headings (= 0.5.1)
|
||||
jemoji (= 0.10.1)
|
||||
jemoji (= 0.10.2)
|
||||
kramdown (= 1.17.0)
|
||||
liquid (= 4.0.0)
|
||||
listen (= 3.1.5)
|
||||
mercenary (~> 0.3)
|
||||
minima (= 2.5.0)
|
||||
nokogiri (>= 1.8.2, < 2.0)
|
||||
nokogiri (>= 1.8.5, < 2.0)
|
||||
rouge (= 2.2.1)
|
||||
terminal-table (~> 1.4)
|
||||
github-pages-health-check (1.8.1)
|
||||
github-pages-health-check (1.16.1)
|
||||
addressable (~> 2.3)
|
||||
dnsruby (~> 1.60)
|
||||
octokit (~> 4.0)
|
||||
public_suffix (~> 2.0)
|
||||
public_suffix (~> 3.0)
|
||||
typhoeus (~> 1.3)
|
||||
html-pipeline (2.9.0)
|
||||
html-pipeline (2.11.0)
|
||||
activesupport (>= 2)
|
||||
nokogiri (>= 1.4)
|
||||
http_parser.rb (0.6.0)
|
||||
i18n (0.9.5)
|
||||
concurrent-ruby (~> 1.0)
|
||||
jekyll (3.7.4)
|
||||
jekyll (3.8.5)
|
||||
addressable (~> 2.4)
|
||||
colorator (~> 1.0)
|
||||
em-websocket (~> 0.5)
|
||||
@@ -105,21 +105,21 @@ GEM
|
||||
jekyll-coffeescript (1.1.1)
|
||||
coffee-script (~> 2.2)
|
||||
coffee-script-source (~> 1.11.1)
|
||||
jekyll-commonmark (1.2.0)
|
||||
jekyll-commonmark (1.3.1)
|
||||
commonmarker (~> 0.14)
|
||||
jekyll (>= 3.0, < 4.0)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-commonmark-ghpages (0.1.5)
|
||||
commonmarker (~> 0.17.6)
|
||||
jekyll-commonmark (~> 1)
|
||||
rouge (~> 2)
|
||||
jekyll-default-layout (0.1.4)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-feed (0.10.0)
|
||||
jekyll-feed (0.11.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-gist (1.5.0)
|
||||
octokit (~> 4.2)
|
||||
jekyll-github-metadata (2.9.4)
|
||||
jekyll (~> 3.1)
|
||||
jekyll-github-metadata (2.12.1)
|
||||
jekyll (~> 3.4)
|
||||
octokit (~> 4.0, != 4.4.0)
|
||||
jekyll-mentions (1.4.1)
|
||||
html-pipeline (~> 2.3)
|
||||
@@ -131,7 +131,7 @@ GEM
|
||||
jekyll (~> 3.0)
|
||||
jekyll-redirect-from (0.14.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-relative-links (0.5.3)
|
||||
jekyll-relative-links (0.6.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-remote-theme (0.3.1)
|
||||
jekyll (~> 3.5)
|
||||
@@ -185,9 +185,9 @@ GEM
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-titles-from-headings (0.5.1)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-watch (2.1.2)
|
||||
jekyll-watch (2.2.1)
|
||||
listen (~> 3.0)
|
||||
jemoji (0.10.1)
|
||||
jemoji (0.10.2)
|
||||
gemoji (~> 3.0)
|
||||
html-pipeline (~> 2.2)
|
||||
jekyll (~> 3.0)
|
||||
@@ -198,37 +198,38 @@ GEM
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
ruby_dep (~> 1.2)
|
||||
mercenary (0.3.6)
|
||||
mini_portile2 (2.3.0)
|
||||
mini_portile2 (2.4.0)
|
||||
minima (2.5.0)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-feed (~> 0.9)
|
||||
jekyll-seo-tag (~> 2.1)
|
||||
minitest (5.8.5)
|
||||
multipart-post (2.0.0)
|
||||
nokogiri (1.8.5)
|
||||
mini_portile2 (~> 2.3.0)
|
||||
octokit (4.13.0)
|
||||
minitest (5.11.3)
|
||||
multipart-post (2.1.1)
|
||||
nokogiri (1.10.3)
|
||||
mini_portile2 (~> 2.4.0)
|
||||
octokit (4.14.0)
|
||||
sawyer (~> 0.8.0, >= 0.5.3)
|
||||
pathutil (0.16.2)
|
||||
forwardable-extended (~> 2.6)
|
||||
public_suffix (2.0.5)
|
||||
public_suffix (3.0.3)
|
||||
rb-fsevent (0.10.3)
|
||||
rb-inotify (0.9.10)
|
||||
ffi (>= 0.5.0, < 2)
|
||||
rb-inotify (0.10.0)
|
||||
ffi (~> 1.0)
|
||||
redcarpet (3.4.0)
|
||||
rouge (2.2.1)
|
||||
ruby-enum (0.7.2)
|
||||
i18n
|
||||
ruby_dep (1.5.0)
|
||||
rubyzip (1.2.2)
|
||||
safe_yaml (1.0.4)
|
||||
sass (3.7.2)
|
||||
safe_yaml (1.0.5)
|
||||
sass (3.7.4)
|
||||
sass-listen (~> 4.0.0)
|
||||
sass-listen (4.0.0)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
sawyer (0.8.1)
|
||||
addressable (>= 2.3.5, < 2.6)
|
||||
faraday (~> 0.8, < 1.0)
|
||||
sawyer (0.8.2)
|
||||
addressable (>= 2.3.5)
|
||||
faraday (> 0.8, < 2.0)
|
||||
terminal-table (1.8.0)
|
||||
unicode-display_width (~> 1.1, >= 1.1.1)
|
||||
thread_safe (0.3.6)
|
||||
@@ -236,13 +237,14 @@ GEM
|
||||
ethon (>= 0.9.0)
|
||||
tzinfo (1.2.5)
|
||||
thread_safe (~> 0.1)
|
||||
unicode-display_width (1.4.0)
|
||||
unicode-display_width (1.6.0)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
github-pages
|
||||
redcarpet
|
||||
|
||||
BUNDLED WITH
|
||||
2.0.1
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# Dependencies
|
||||
If you are running a build on Ubuntu you will need the following packages
|
||||
# Dependencies for MacOS
|
||||
|
||||
Install the following for an easy to use dev environment:
|
||||
|
||||
* `brew install rbenv`
|
||||
* `rbenv install 2.6.3`
|
||||
* `gem install bundler`
|
||||
|
||||
# Dependencies for Linux
|
||||
If you are running a build on Ubuntu you will need the following packages:
|
||||
* ruby
|
||||
* ruby-dev
|
||||
* ruby-bundler
|
||||
@@ -7,13 +15,15 @@ If you are running a build on Ubuntu you will need the following packages
|
||||
* zlib1g-dev
|
||||
* nginx (or apache2)
|
||||
|
||||
For other operating systems such as MacOS you will need equivalent packages or install xcode
|
||||
|
||||
# Local Development
|
||||
1. Install Jekyll and plug-ins in one fell swoop. `gem install github-pages`
|
||||
This mirrors the plug-ins used by GitHub Pages on your local machine including Jekyll, Sass, etc.
|
||||
2. Clone down your fork `git@github.com:smalltalk-ai/vmware-jekyll-velero.git`
|
||||
3. cd into the `site` directory
|
||||
4. Serve the site and watch for markup/sass changes `jekyll serve --livereload`. you may need to run `bundle exec jekyll serve --livereload`.
|
||||
5. View your website at http://127.0.0.1:4000/
|
||||
6. Commit any changes and push everything to the master branch of your GitHub user repository. GitHub Pages will then rebuild and serve your website.
|
||||
2. Clone down your own fork, or clone the main repo `git clone https://github.com/heptio/velero` and add your own remote.
|
||||
3. `cd velero/site`
|
||||
4. `rbenv local 2.6.3`
|
||||
5. `bundle install`
|
||||
6. Serve the site and watch for markup/sass changes `jekyll serve --livereload`. You may need to run `bundle exec jekyll serve --livereload`.
|
||||
7. View your website at http://127.0.0.1:4000/
|
||||
8. Commit any changes and push everything to your fork.
|
||||
9. Once you're ready, submit a PR of your changes. Netlify will automatically generate a preview of your changes.
|
||||
|
||||
@@ -7,6 +7,7 @@ url:
|
||||
logo: Velero.svg
|
||||
vm_logo: vm-logo.png
|
||||
gh_repo: https://github.com/heptio/velero
|
||||
markdown: redcarpet
|
||||
hero:
|
||||
background-color: med-blue
|
||||
footer:
|
||||
@@ -173,3 +174,6 @@ exclude:
|
||||
- CNAME
|
||||
- Runbook.docx
|
||||
- '*.sh'
|
||||
|
||||
redcarpet:
|
||||
extensions: ["no_intra_emphasis", "tables", "autolink", "strikethrough", "with_toc_data"]
|
||||
|
||||
9
site/_includes/head-docs.html
Normal file
9
site/_includes/head-docs.html
Normal file
@@ -0,0 +1,9 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta http-equiv="X-UA-Compatible" content="ie=edge">
|
||||
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
|
||||
<link rel="icon" href="/favicon.ico" type="image/x-icon">
|
||||
<link rel="stylesheet" href="/css/styles.css?{{site.time | date: '%s%N'}}">
|
||||
|
||||
</head>
|
||||
@@ -1,10 +1,17 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
{% include head.html %}
|
||||
{% include head-docs.html %}
|
||||
{% if page.version != "master" %}
|
||||
<!-- Block google from indexing versioned docs -->
|
||||
<meta name="robots" content="noindex">
|
||||
{% endif %}
|
||||
{% if page.name != "README.md" %}
|
||||
<title>{{ site.title }} Docs - {{page.title}}</title>
|
||||
{% endif %}
|
||||
{% if page.name == "README.md" %}
|
||||
<title>{{ site.title }} Docs - Overview</title>
|
||||
{% endif %}
|
||||
|
||||
<body id="docs">
|
||||
<div class="container-fluid site-outer-container">
|
||||
<div class="site-container">
|
||||
|
||||
@@ -36,3 +36,21 @@ h5 {
|
||||
strong {
|
||||
font-weight: $font-weight-semibold;
|
||||
}
|
||||
pre {
|
||||
display: block;
|
||||
font-size: $code-font-size;
|
||||
color: $pre-color;
|
||||
background-color: #f2f2f2;
|
||||
padding-top: 5px;
|
||||
padding-bottom: 5px;
|
||||
padding-left: 5px;
|
||||
padding-right: 5px;
|
||||
|
||||
|
||||
// Account for some code outputs that place code tags in pre tags
|
||||
code {
|
||||
font-size: inherit;
|
||||
color: inherit;
|
||||
word-break: normal;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
2. Extract the tarball:
|
||||
```bash
|
||||
|
||||
```
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
3. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -50,6 +52,7 @@ aws s3api create-bucket \
|
||||
For more information, see [the AWS documentation on IAM users][14].
|
||||
|
||||
1. Create the IAM user:
|
||||
|
||||
```bash
|
||||
aws iam create-user --user-name velero
|
||||
```
|
||||
@@ -57,7 +60,8 @@ aws iam create-user --user-name velero
|
||||
If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
2. Attach policies to give `velero` the necessary permissions:
|
||||
```bash
|
||||
|
||||
```
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -108,11 +112,13 @@ aws iam put-user-policy \
|
||||
```
|
||||
|
||||
3. Create an access key for the user:
|
||||
|
||||
```bash
|
||||
aws iam create-access-key --user-name velero
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessKey": {
|
||||
@@ -126,6 +132,7 @@ aws iam create-access-key --user-name velero
|
||||
```
|
||||
|
||||
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```bash
|
||||
[default]
|
||||
aws_access_key_id=<AWS_ACCESS_KEY_ID>
|
||||
@@ -158,9 +165,9 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
## Setting AWS_CLUSTER_NAME (Optional)
|
||||
|
||||
* If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
|
||||
If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
|
||||
|
||||
* Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
|
||||
|
||||
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
|
||||
@@ -192,7 +199,8 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally.
|
||||
|
||||
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
|
||||
```bash
|
||||
|
||||
```
|
||||
cat > velero-trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -217,12 +225,14 @@ EOF
|
||||
```
|
||||
|
||||
2. Create the IAM role:
|
||||
|
||||
```bash
|
||||
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
|
||||
```
|
||||
|
||||
3. Attach policies to give `velero` the necessary permissions:
|
||||
```bash
|
||||
|
||||
```
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
@@ -272,7 +282,9 @@ aws iam put-role-policy \
|
||||
--policy-name velero-policy \
|
||||
--policy-document file://./velero-policy.json
|
||||
```
|
||||
|
||||
4. Update `AWS_ACCOUNT_ID` & `VELERO_ROLE_NAME` with `kubectl edit deploy/velero -n velero` and add the following annotation:
|
||||
|
||||
```
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
@@ -288,6 +300,7 @@ spec:
|
||||
component: velero
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
...
|
||||
```
|
||||
|
||||
## Installing the nginx example (optional)
|
||||
|
||||
@@ -29,9 +29,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -50,12 +52,16 @@ this name however you'd like, following the [Azure naming rules for storage acco
|
||||
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
|
||||
configured to only allow access via https.
|
||||
|
||||
Create a resource group for the backups storage account. Change the location as needed.
|
||||
|
||||
```bash
|
||||
# Create a resource group for the backups storage account. Change the location as needed.
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups
|
||||
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
|
||||
```
|
||||
|
||||
# Create the storage account
|
||||
Create the storage account.
|
||||
|
||||
```bash
|
||||
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
@@ -78,7 +84,7 @@ az storage container create -n $BLOB_CONTAINER --public-access off --account-nam
|
||||
|
||||
1. Set the name of the Resource Group that contains your Kubernetes cluster's virtual machines/disks.
|
||||
|
||||
> **WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
|
||||
**WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
|
||||
when you provision your cluster in Azure, since this is the resource group that contains your cluster's virtual machines/disks.
|
||||
|
||||
```bash
|
||||
@@ -106,23 +112,30 @@ To integrate Velero with Azure, you must create a Velero-specific [service princ
|
||||
|
||||
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
|
||||
|
||||
> If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
Create service principal and specify your own password:
|
||||
|
||||
```bash
|
||||
# Create service principal and specify your own password
|
||||
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
|
||||
az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
```
|
||||
|
||||
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
|
||||
```bash
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv`
|
||||
```
|
||||
|
||||
# After creating the service principal, obtain the client id
|
||||
After creating the service principal, obtain the client id.
|
||||
|
||||
```bash
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
|
||||
```
|
||||
|
||||
Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
|
||||
1. Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
|
||||
|
||||
```bash
|
||||
```
|
||||
cat << EOF > ./credentials-velero
|
||||
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
|
||||
AZURE_TENANT_ID=${AZURE_TENANT_ID}
|
||||
@@ -157,7 +170,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
|
||||
Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
|
||||
|
||||
[0]: namespace.md
|
||||
[8]: api-types/volumesnapshotlocation.md#azure
|
||||
|
||||
@@ -37,10 +37,13 @@ Note that the Makefile targets assume building from a git repository. When build
|
||||
There are a number of different ways to build `velero` depending on your needs. This section outlines the main possibilities.
|
||||
|
||||
To build the `velero` binary on your local machine, compiled for your OS and architecture, run:
|
||||
|
||||
```bash
|
||||
go build ./cmd/velero
|
||||
```
|
||||
|
||||
or:
|
||||
|
||||
```bash
|
||||
make local
|
||||
```
|
||||
@@ -48,6 +51,7 @@ make local
|
||||
The latter will place the compiled binary under `$PWD/_output/bin/$GOOS/$GOARCH`, and will splice version and git commit information in so that `velero version` displays proper output. `velero install` will also use the version information to determine which tagged image to deploy.
|
||||
|
||||
To build the `velero` binary targeting `linux/amd64` within a build container on your local machine, run:
|
||||
|
||||
```bash
|
||||
make build
|
||||
```
|
||||
@@ -55,11 +59,13 @@ make build
|
||||
See the **Cross compiling** section below for details on building for alternate OS/architecture combinations.
|
||||
|
||||
To build a Velero container image, first set the `$REGISTRY` environment variable. For example, if you want to build the `gcr.io/my-registry/velero:master` image, set `$REGISTRY` to `gcr.io/my-registry`. Optionally, set the `$VERSION` environment variable to change the image tag. Then, run:
|
||||
|
||||
```bash
|
||||
make container
|
||||
```
|
||||
|
||||
To push your image to a registry, run:
|
||||
|
||||
```bash
|
||||
make push
|
||||
```
|
||||
@@ -205,6 +211,7 @@ rm fake-credentials-file
|
||||
1. Ensure you've built a `velero` container image and either loaded it onto your cluster's nodes, or pushed it to a registry (see [build][3]).
|
||||
|
||||
1. Install Velero into the cluster (the example below assumes you're using AWS):
|
||||
|
||||
```bash
|
||||
velero install \
|
||||
--provider aws \
|
||||
|
||||
@@ -24,6 +24,7 @@ kubectl -n velero logs deployment/velero
|
||||
#### Using credentials
|
||||
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `credentials-velero` file is formatted properly and has the correct values:
|
||||
@@ -33,11 +34,13 @@ into the Velero server pod. Ensure the following:
|
||||
aws_access_key_id=<your AWS access key ID>
|
||||
aws_secret_access_key=<your AWS secret access key>
|
||||
```
|
||||
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
#### Using kube2iam
|
||||
This means that Velero can't read the content of the S3 bucket. Ensure the following:
|
||||
|
||||
* There is a Trust Policy document allowing the role used by kube2iam to assume Velero's role, as stated in the AWS config documentation.
|
||||
* The new Velero role has all the permissions listed in the documentation regarding S3.
|
||||
|
||||
@@ -47,6 +50,7 @@ This means that Velero can't read the content of the S3 bucket. Ensure the follo
|
||||
### `Failed to refresh the Token` or `adal: Refresh request failed`
|
||||
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
|
||||
properly into the Velero server pod. Ensure the following:
|
||||
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
@@ -58,6 +62,7 @@ properly into the Velero server pod. Ensure the following:
|
||||
### `open credentials/cloud: no such file or directory`
|
||||
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
|
||||
@@ -16,10 +16,13 @@ backup-test-2-20170726180515 backup-test-2 Completed 0 1 2
|
||||
```
|
||||
|
||||
To delve into the warnings and errors into more detail, you can use `velero restore describe`:
|
||||
```
|
||||
|
||||
```bash
|
||||
velero restore describe backup-test-20170726180512
|
||||
```
|
||||
|
||||
The output looks like this:
|
||||
|
||||
```
|
||||
Name: backup-test-20170726180512
|
||||
Namespace: velero
|
||||
|
||||
@@ -9,6 +9,7 @@ If you periodically back up your cluster's resources, you are able to return to
|
||||
```
|
||||
velero schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
```
|
||||
|
||||
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
|
||||
|
||||
1. A disaster happens and you need to recreate your resources.
|
||||
@@ -16,9 +17,7 @@ If you periodically back up your cluster's resources, you are able to return to
|
||||
1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
|
||||
1. Create a restore with your most recent Velero Backup:
|
||||
|
||||
```
|
||||
velero restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -16,9 +16,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -74,7 +76,6 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
|
||||
3. Attach policies to give `velero` the necessary permissions to function:
|
||||
|
||||
```bash
|
||||
|
||||
ROLE_PERMISSIONS=(
|
||||
compute.disks.get
|
||||
compute.disks.create
|
||||
@@ -136,7 +137,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
|
||||
|
||||
[0]: namespace.md
|
||||
@@ -146,4 +147,3 @@ If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
[16]: https://cloud.google.com/sdk/docs/
|
||||
[20]: faq.md
|
||||
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap
|
||||
|
||||
|
||||
@@ -26,9 +26,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -36,6 +38,7 @@ of the Velero repository is under active development and is not guaranteed to be
|
||||
#### MacOS Installation
|
||||
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client:
|
||||
|
||||
```bash
|
||||
brew install velero
|
||||
```
|
||||
@@ -54,9 +57,10 @@ These instructions start the Velero server and a Minio instance that is accessib
|
||||
|
||||
1. Start the server and the local storage service. In the Velero directory, run:
|
||||
|
||||
```bash
|
||||
```
|
||||
kubectl apply -f examples/minio/00-minio-deployment.yaml
|
||||
|
||||
```
|
||||
```
|
||||
velero install \
|
||||
--provider aws \
|
||||
--bucket velero \
|
||||
@@ -217,9 +221,7 @@ You must also get the Minio URL, which you can then specify as the value of the
|
||||
```
|
||||
|
||||
- in any other environment:
|
||||
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
|
||||
|
||||
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
|
||||
|
||||
```shell
|
||||
|
||||
@@ -18,22 +18,26 @@ You can use the following annotations on a pod to make Velero execute a hook whe
|
||||
|
||||
#### Pre hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
* `pre.hook.backup.velero.io/container`
|
||||
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
|
||||
* `pre.hook.backup.velero.io/command`
|
||||
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
|
||||
* `pre.hook.backup.velero.io/on-error`
|
||||
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
|
||||
* `pre.hook.backup.velero.io/timeout`
|
||||
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
|
||||
|
||||
|
||||
#### Post hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
* `post.hook.backup.velero.io/container`
|
||||
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
|
||||
* `post.hook.backup.velero.io/command`
|
||||
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
|
||||
* `post.hook.backup.velero.io/on-error`
|
||||
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
|
||||
* `post.hook.backup.velero.io/timeout`
|
||||
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
|
||||
|
||||
### Specifying Hooks in the Backup Spec
|
||||
|
||||
|
||||
@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -80,7 +82,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
|
||||
|
||||
[0]: namespace.md
|
||||
|
||||
@@ -76,6 +76,7 @@ velero backup-location create s3-alt-region \
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
# The Velero server will automatically store backups in the backup storage location named "default" if
|
||||
# one is not specified when creating the backup. You can alter which backup storage location is used
|
||||
@@ -83,7 +84,9 @@ During backup creation:
|
||||
# by the Velero deployment) to the name of a different backup storage location.
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```shell
|
||||
velero backup create full-cluster-alternate-location-backup \
|
||||
--storage-location s3-alt-region
|
||||
@@ -140,6 +143,7 @@ velero snapshot-location create ebs-us-west-1 \
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
# Velero will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Nothing needs to be specified when creating a backup.
|
||||
|
||||
@@ -9,6 +9,7 @@ Velero can help you port your resources from one cluster to another, as long as
|
||||
```
|
||||
velero backup create <BACKUP-NAME>
|
||||
```
|
||||
|
||||
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
|
||||
|
||||
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Velero deployment YAML.
|
||||
|
||||
@@ -15,6 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru
|
||||
- a plugin with the same name cannot not already exist
|
||||
|
||||
### Some examples:
|
||||
|
||||
```
|
||||
- example.io/azure
|
||||
- 1.2.3.4/5678
|
||||
|
||||
@@ -100,7 +100,8 @@ You're now ready to use Velero with restic.
|
||||
|
||||
```bash
|
||||
velero backup describe YOUR_BACKUP_NAME
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml
|
||||
```
|
||||
|
||||
@@ -116,7 +117,8 @@ You're now ready to use Velero with restic.
|
||||
|
||||
```bash
|
||||
velero restore describe YOUR_RESTORE_NAME
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Download the [v0.11.1 release tarball][1] tarball for your client platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
@@ -33,35 +34,46 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
1. Scale down your existing Velero deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n velero scale deployment/velero --replicas 0
|
||||
```
|
||||
|
||||
1. Fetch velero's credentials for accessing your object storage bucket and store them locally for use by `velero migrate-backups`:
|
||||
|
||||
For AWS:
|
||||
|
||||
```bash
|
||||
# for AWS
|
||||
export AWS_SHARED_CREDENTIALS_FILE=./velero-migrate-backups-credentials
|
||||
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $AWS_SHARED_CREDENTIALS_FILE
|
||||
````
|
||||
|
||||
# for Azure
|
||||
For Azure:
|
||||
|
||||
```bash
|
||||
export AZURE_SUBSCRIPTION_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_SUBSCRIPTION_ID}" | base64 --decode)
|
||||
export AZURE_TENANT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_TENANT_ID}" | base64 --decode)
|
||||
export AZURE_CLIENT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_ID}" | base64 --decode)
|
||||
export AZURE_CLIENT_SECRET=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_SECRET}" | base64 --decode)
|
||||
export AZURE_RESOURCE_GROUP=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_RESOURCE_GROUP}" | base64 --decode)
|
||||
```
|
||||
|
||||
# for GCP
|
||||
For GCP:
|
||||
|
||||
```bash
|
||||
export GOOGLE_APPLICATION_CREDENTIALS=./velero-migrate-backups-credentials
|
||||
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $GOOGLE_APPLICATION_CREDENTIALS
|
||||
```
|
||||
|
||||
1. List all of your backup storage locations:
|
||||
|
||||
```bash
|
||||
velero backup-location get
|
||||
```
|
||||
|
||||
1. For each backup storage location that you want to use with Velero 1.0, replace any legacy pre-v0.11 backup metadata with the equivalent current formats:
|
||||
```bash
|
||||
|
||||
```
|
||||
# - BACKUP_LOCATION_NAME is the name of a backup location from the previous step, whose
|
||||
# backup metadata will be updated in object storage
|
||||
# - SNAPSHOT_LOCATION_NAME is the name of the volume snapshot location that Velero should
|
||||
@@ -73,24 +85,33 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
```
|
||||
|
||||
1. Scale up your deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n velero scale deployment/velero --replicas 1
|
||||
```
|
||||
|
||||
1. Remove the local velero credentials:
|
||||
```bash
|
||||
# for AWS
|
||||
1. Remove the local `velero` credentials:
|
||||
|
||||
For AWS:
|
||||
|
||||
```
|
||||
rm $AWS_SHARED_CREDENTIALS_FILE
|
||||
unset AWS_SHARED_CREDENTIALS_FILE
|
||||
```
|
||||
|
||||
# for Azure
|
||||
For Azure:
|
||||
|
||||
```
|
||||
unset AZURE_SUBSCRIPTION_ID
|
||||
unset AZURE_TENANT_ID
|
||||
unset AZURE_CLIENT_ID
|
||||
unset AZURE_CLIENT_SECRET
|
||||
unset AZURE_RESOURCE_GROUP
|
||||
```
|
||||
|
||||
# for GCP
|
||||
For GCP:
|
||||
|
||||
```
|
||||
rm $GOOGLE_APPLICATION_CREDENTIALS
|
||||
unset GOOGLE_APPLICATION_CREDENTIALS
|
||||
```
|
||||
@@ -104,6 +125,7 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Download the [v1.0 release tarball][2] tarball for your client platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
@@ -111,6 +133,7 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH, replacing any existing pre-1.0 `velero` binaries.
|
||||
|
||||
1. Update the image for the Velero deployment and daemon set (if applicable):
|
||||
|
||||
```bash
|
||||
kubectl -n velero set image deployment/velero velero=gcr.io/heptio-images/velero:v1.0.0
|
||||
kubectl -n velero set image daemonset/restic restic=gcr.io/heptio-images/velero:v1.0.0
|
||||
|
||||
@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
2. Extract the tarball:
|
||||
```bash
|
||||
|
||||
```
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
3. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -50,6 +52,7 @@ aws s3api create-bucket \
|
||||
For more information, see [the AWS documentation on IAM users][14].
|
||||
|
||||
1. Create the IAM user:
|
||||
|
||||
```bash
|
||||
aws iam create-user --user-name velero
|
||||
```
|
||||
@@ -57,7 +60,8 @@ aws iam create-user --user-name velero
|
||||
If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
2. Attach policies to give `velero` the necessary permissions:
|
||||
```bash
|
||||
|
||||
```
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -108,11 +112,13 @@ aws iam put-user-policy \
|
||||
```
|
||||
|
||||
3. Create an access key for the user:
|
||||
|
||||
```bash
|
||||
aws iam create-access-key --user-name velero
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessKey": {
|
||||
@@ -126,6 +132,7 @@ aws iam create-access-key --user-name velero
|
||||
```
|
||||
|
||||
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```bash
|
||||
[default]
|
||||
aws_access_key_id=<AWS_ACCESS_KEY_ID>
|
||||
@@ -158,9 +165,9 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
## Setting AWS_CLUSTER_NAME (Optional)
|
||||
|
||||
* If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
|
||||
If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
|
||||
|
||||
* Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
|
||||
|
||||
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
|
||||
@@ -192,7 +199,8 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally.
|
||||
|
||||
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
|
||||
```bash
|
||||
|
||||
```
|
||||
cat > velero-trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -217,12 +225,14 @@ EOF
|
||||
```
|
||||
|
||||
2. Create the IAM role:
|
||||
|
||||
```bash
|
||||
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
|
||||
```
|
||||
|
||||
3. Attach policies to give `velero` the necessary permissions:
|
||||
```bash
|
||||
|
||||
```
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
@@ -272,7 +282,9 @@ aws iam put-role-policy \
|
||||
--policy-name velero-policy \
|
||||
--policy-document file://./velero-policy.json
|
||||
```
|
||||
|
||||
4. Update `AWS_ACCOUNT_ID` & `VELERO_ROLE_NAME` with `kubectl edit deploy/velero -n velero` and add the following annotation:
|
||||
|
||||
```
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
@@ -288,6 +300,7 @@ spec:
|
||||
component: velero
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
...
|
||||
```
|
||||
|
||||
## Installing the nginx example (optional)
|
||||
|
||||
@@ -29,9 +29,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -50,12 +52,16 @@ this name however you'd like, following the [Azure naming rules for storage acco
|
||||
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
|
||||
configured to only allow access via https.
|
||||
|
||||
Create a resource group for the backups storage account. Change the location as needed.
|
||||
|
||||
```bash
|
||||
# Create a resource group for the backups storage account. Change the location as needed.
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups
|
||||
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
|
||||
```
|
||||
|
||||
# Create the storage account
|
||||
Create the storage account.
|
||||
|
||||
```bash
|
||||
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
@@ -78,7 +84,7 @@ az storage container create -n $BLOB_CONTAINER --public-access off --account-nam
|
||||
|
||||
1. Set the name of the Resource Group that contains your Kubernetes cluster's virtual machines/disks.
|
||||
|
||||
> **WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
|
||||
**WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
|
||||
when you provision your cluster in Azure, since this is the resource group that contains your cluster's virtual machines/disks.
|
||||
|
||||
```bash
|
||||
@@ -106,23 +112,30 @@ To integrate Velero with Azure, you must create a Velero-specific [service princ
|
||||
|
||||
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
|
||||
|
||||
> If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
Create service principal and specify your own password:
|
||||
|
||||
```bash
|
||||
# Create service principal and specify your own password
|
||||
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
|
||||
az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
```
|
||||
|
||||
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
|
||||
```bash
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv`
|
||||
```
|
||||
|
||||
# After creating the service principal, obtain the client id
|
||||
After creating the service principal, obtain the client id.
|
||||
|
||||
```bash
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
|
||||
```
|
||||
|
||||
Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
|
||||
1. Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
|
||||
|
||||
```bash
|
||||
```
|
||||
cat << EOF > ./credentials-velero
|
||||
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
|
||||
AZURE_TENANT_ID=${AZURE_TENANT_ID}
|
||||
@@ -157,7 +170,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
|
||||
Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
|
||||
|
||||
[0]: namespace.md
|
||||
[8]: api-types/volumesnapshotlocation.md#azure
|
||||
|
||||
@@ -37,10 +37,13 @@ Note that the Makefile targets assume building from a git repository. When build
|
||||
There are a number of different ways to build `velero` depending on your needs. This section outlines the main possibilities.
|
||||
|
||||
To build the `velero` binary on your local machine, compiled for your OS and architecture, run:
|
||||
|
||||
```bash
|
||||
go build ./cmd/velero
|
||||
```
|
||||
|
||||
or:
|
||||
|
||||
```bash
|
||||
make local
|
||||
```
|
||||
@@ -48,6 +51,7 @@ make local
|
||||
The latter will place the compiled binary under `$PWD/_output/bin/$GOOS/$GOARCH`, and will splice version and git commit information in so that `velero version` displays proper output. `velero install` will also use the version information to determine which tagged image to deploy.
|
||||
|
||||
To build the `velero` binary targeting `linux/amd64` within a build container on your local machine, run:
|
||||
|
||||
```bash
|
||||
make build
|
||||
```
|
||||
@@ -55,11 +59,13 @@ make build
|
||||
See the **Cross compiling** section below for details on building for alternate OS/architecture combinations.
|
||||
|
||||
To build a Velero container image, first set the `$REGISTRY` environment variable. For example, if you want to build the `gcr.io/my-registry/velero:master` image, set `$REGISTRY` to `gcr.io/my-registry`. Optionally, set the `$VERSION` environment variable to change the image tag. Then, run:
|
||||
|
||||
```bash
|
||||
make container
|
||||
```
|
||||
|
||||
To push your image to a registry, run:
|
||||
|
||||
```bash
|
||||
make push
|
||||
```
|
||||
@@ -205,6 +211,7 @@ rm fake-credentials-file
|
||||
1. Ensure you've built a `velero` container image and either loaded it onto your cluster's nodes, or pushed it to a registry (see [build][3]).
|
||||
|
||||
1. Install Velero into the cluster (the example below assumes you're using AWS):
|
||||
|
||||
```bash
|
||||
velero install \
|
||||
--provider aws \
|
||||
|
||||
@@ -24,6 +24,7 @@ kubectl -n velero logs deployment/velero
|
||||
#### Using credentials
|
||||
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `credentials-velero` file is formatted properly and has the correct values:
|
||||
@@ -33,11 +34,13 @@ into the Velero server pod. Ensure the following:
|
||||
aws_access_key_id=<your AWS access key ID>
|
||||
aws_secret_access_key=<your AWS secret access key>
|
||||
```
|
||||
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
#### Using kube2iam
|
||||
This means that Velero can't read the content of the S3 bucket. Ensure the following:
|
||||
|
||||
* There is a Trust Policy document allowing the role used by kube2iam to assume Velero's role, as stated in the AWS config documentation.
|
||||
* The new Velero role has all the permissions listed in the documentation regarding S3.
|
||||
|
||||
@@ -47,6 +50,7 @@ This means that Velero can't read the content of the S3 bucket. Ensure the follo
|
||||
### `Failed to refresh the Token` or `adal: Refresh request failed`
|
||||
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
|
||||
properly into the Velero server pod. Ensure the following:
|
||||
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
@@ -58,6 +62,7 @@ properly into the Velero server pod. Ensure the following:
|
||||
### `open credentials/cloud: no such file or directory`
|
||||
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
|
||||
@@ -16,10 +16,13 @@ backup-test-2-20170726180515 backup-test-2 Completed 0 1 2
|
||||
```
|
||||
|
||||
To delve into the warnings and errors into more detail, you can use `velero restore describe`:
|
||||
```
|
||||
|
||||
```bash
|
||||
velero restore describe backup-test-20170726180512
|
||||
```
|
||||
|
||||
The output looks like this:
|
||||
|
||||
```
|
||||
Name: backup-test-20170726180512
|
||||
Namespace: velero
|
||||
|
||||
@@ -9,6 +9,7 @@ If you periodically back up your cluster's resources, you are able to return to
|
||||
```
|
||||
velero schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
```
|
||||
|
||||
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
|
||||
|
||||
1. A disaster happens and you need to recreate your resources.
|
||||
@@ -16,9 +17,7 @@ If you periodically back up your cluster's resources, you are able to return to
|
||||
1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
|
||||
1. Create a restore with your most recent Velero Backup:
|
||||
|
||||
```
|
||||
velero restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -16,9 +16,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -74,7 +76,6 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
|
||||
3. Attach policies to give `velero` the necessary permissions to function:
|
||||
|
||||
```bash
|
||||
|
||||
ROLE_PERMISSIONS=(
|
||||
compute.disks.get
|
||||
compute.disks.create
|
||||
@@ -136,7 +137,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
|
||||
|
||||
[0]: namespace.md
|
||||
@@ -146,4 +147,3 @@ If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
[16]: https://cloud.google.com/sdk/docs/
|
||||
[20]: faq.md
|
||||
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap
|
||||
|
||||
|
||||
@@ -26,9 +26,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -36,6 +38,7 @@ of the Velero repository is under active development and is not guaranteed to be
|
||||
#### MacOS Installation
|
||||
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client:
|
||||
|
||||
```bash
|
||||
brew install velero
|
||||
```
|
||||
@@ -54,9 +57,10 @@ These instructions start the Velero server and a Minio instance that is accessib
|
||||
|
||||
1. Start the server and the local storage service. In the Velero directory, run:
|
||||
|
||||
```bash
|
||||
```
|
||||
kubectl apply -f examples/minio/00-minio-deployment.yaml
|
||||
|
||||
```
|
||||
```
|
||||
velero install \
|
||||
--provider aws \
|
||||
--bucket velero \
|
||||
@@ -217,9 +221,7 @@ You must also get the Minio URL, which you can then specify as the value of the
|
||||
```
|
||||
|
||||
- in any other environment:
|
||||
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
|
||||
|
||||
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
|
||||
|
||||
```shell
|
||||
|
||||
@@ -18,22 +18,26 @@ You can use the following annotations on a pod to make Velero execute a hook whe
|
||||
|
||||
#### Pre hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
* `pre.hook.backup.velero.io/container`
|
||||
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
|
||||
* `pre.hook.backup.velero.io/command`
|
||||
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
|
||||
* `pre.hook.backup.velero.io/on-error`
|
||||
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
|
||||
* `pre.hook.backup.velero.io/timeout`
|
||||
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
|
||||
|
||||
|
||||
#### Post hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
* `post.hook.backup.velero.io/container`
|
||||
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
|
||||
* `post.hook.backup.velero.io/command`
|
||||
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
|
||||
* `post.hook.backup.velero.io/on-error`
|
||||
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
|
||||
* `post.hook.backup.velero.io/timeout`
|
||||
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
|
||||
|
||||
### Specifying Hooks in the Backup Spec
|
||||
|
||||
|
||||
@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
|
||||
of the Velero repository is under active development and is not guaranteed to be stable!_
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
@@ -80,7 +82,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
|
||||
|
||||
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
|
||||
|
||||
[0]: namespace.md
|
||||
|
||||
@@ -76,6 +76,7 @@ velero backup-location create s3-alt-region \
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
# The Velero server will automatically store backups in the backup storage location named "default" if
|
||||
# one is not specified when creating the backup. You can alter which backup storage location is used
|
||||
@@ -83,7 +84,9 @@ During backup creation:
|
||||
# by the Velero deployment) to the name of a different backup storage location.
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```shell
|
||||
velero backup create full-cluster-alternate-location-backup \
|
||||
--storage-location s3-alt-region
|
||||
@@ -140,6 +143,7 @@ velero snapshot-location create ebs-us-west-1 \
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
# Velero will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Nothing needs to be specified when creating a backup.
|
||||
|
||||
@@ -9,6 +9,7 @@ Velero can help you port your resources from one cluster to another, as long as
|
||||
```
|
||||
velero backup create <BACKUP-NAME>
|
||||
```
|
||||
|
||||
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
|
||||
|
||||
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Velero deployment YAML.
|
||||
|
||||
@@ -15,6 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru
|
||||
- a plugin with the same name cannot not already exist
|
||||
|
||||
### Some examples:
|
||||
|
||||
```
|
||||
- example.io/azure
|
||||
- 1.2.3.4/5678
|
||||
|
||||
@@ -100,7 +100,8 @@ You're now ready to use Velero with restic.
|
||||
|
||||
```bash
|
||||
velero backup describe YOUR_BACKUP_NAME
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml
|
||||
```
|
||||
|
||||
@@ -116,7 +117,8 @@ You're now ready to use Velero with restic.
|
||||
|
||||
```bash
|
||||
velero restore describe YOUR_RESTORE_NAME
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Download the [v0.11.1 release tarball][1] tarball for your client platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
@@ -33,35 +34,46 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
1. Scale down your existing Velero deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n velero scale deployment/velero --replicas 0
|
||||
```
|
||||
|
||||
1. Fetch velero's credentials for accessing your object storage bucket and store them locally for use by `velero migrate-backups`:
|
||||
|
||||
For AWS:
|
||||
|
||||
```bash
|
||||
# for AWS
|
||||
export AWS_SHARED_CREDENTIALS_FILE=./velero-migrate-backups-credentials
|
||||
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $AWS_SHARED_CREDENTIALS_FILE
|
||||
````
|
||||
|
||||
# for Azure
|
||||
For Azure:
|
||||
|
||||
```bash
|
||||
export AZURE_SUBSCRIPTION_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_SUBSCRIPTION_ID}" | base64 --decode)
|
||||
export AZURE_TENANT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_TENANT_ID}" | base64 --decode)
|
||||
export AZURE_CLIENT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_ID}" | base64 --decode)
|
||||
export AZURE_CLIENT_SECRET=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_SECRET}" | base64 --decode)
|
||||
export AZURE_RESOURCE_GROUP=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_RESOURCE_GROUP}" | base64 --decode)
|
||||
```
|
||||
|
||||
# for GCP
|
||||
For GCP:
|
||||
|
||||
```bash
|
||||
export GOOGLE_APPLICATION_CREDENTIALS=./velero-migrate-backups-credentials
|
||||
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $GOOGLE_APPLICATION_CREDENTIALS
|
||||
```
|
||||
|
||||
1. List all of your backup storage locations:
|
||||
|
||||
```bash
|
||||
velero backup-location get
|
||||
```
|
||||
|
||||
1. For each backup storage location that you want to use with Velero 1.0, replace any legacy pre-v0.11 backup metadata with the equivalent current formats:
|
||||
```bash
|
||||
|
||||
```
|
||||
# - BACKUP_LOCATION_NAME is the name of a backup location from the previous step, whose
|
||||
# backup metadata will be updated in object storage
|
||||
# - SNAPSHOT_LOCATION_NAME is the name of the volume snapshot location that Velero should
|
||||
@@ -73,24 +85,33 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
```
|
||||
|
||||
1. Scale up your deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n velero scale deployment/velero --replicas 1
|
||||
```
|
||||
|
||||
1. Remove the local velero credentials:
|
||||
```bash
|
||||
# for AWS
|
||||
1. Remove the local `velero` credentials:
|
||||
|
||||
For AWS:
|
||||
|
||||
```
|
||||
rm $AWS_SHARED_CREDENTIALS_FILE
|
||||
unset AWS_SHARED_CREDENTIALS_FILE
|
||||
```
|
||||
|
||||
# for Azure
|
||||
For Azure:
|
||||
|
||||
```
|
||||
unset AZURE_SUBSCRIPTION_ID
|
||||
unset AZURE_TENANT_ID
|
||||
unset AZURE_CLIENT_ID
|
||||
unset AZURE_CLIENT_SECRET
|
||||
unset AZURE_RESOURCE_GROUP
|
||||
```
|
||||
|
||||
# for GCP
|
||||
For GCP:
|
||||
|
||||
```
|
||||
rm $GOOGLE_APPLICATION_CREDENTIALS
|
||||
unset GOOGLE_APPLICATION_CREDENTIALS
|
||||
```
|
||||
@@ -104,6 +125,7 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Download the [v1.0 release tarball][2] tarball for your client platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
@@ -111,6 +133,7 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH, replacing any existing pre-1.0 `velero` binaries.
|
||||
|
||||
1. Update the image for the Velero deployment and daemon set (if applicable):
|
||||
|
||||
```bash
|
||||
kubectl -n velero set image deployment/velero velero=gcr.io/heptio-images/velero:v1.0.0
|
||||
kubectl -n velero set image daemonset/restic restic=gcr.io/heptio-images/velero:v1.0.0
|
||||
|
||||
Reference in New Issue
Block a user