Docs updates - extra finishing touches (#1516)

* Docs formatting updates

Signed-off-by: Jonas Rosland <jrosland@vmware.com>
This commit is contained in:
Jonas Rosland
2019-05-28 18:42:27 +02:00
committed by Nolan Brubaker
parent 82e464672b
commit bbb11a8d23
42 changed files with 954 additions and 743 deletions

2
.gitignore vendored
View File

@@ -43,5 +43,7 @@ site/.sass-cache
site/.jekyll
site/.jekyll-metadata
site/.bundle
site/vendor
.ruby-version
.vs

View File

@@ -1,2 +1,3 @@
source 'https://rubygems.org'
gem 'github-pages'
gem 'github-pages'
gem 'redcarpet'

View File

@@ -1,12 +1,12 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11.1)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
coffee-script (2.4.1)
coffee-script-source
@@ -15,38 +15,38 @@ GEM
colorator (1.1.0)
commonmarker (0.17.13)
ruby-enum (~> 0.5)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.5)
dnsruby (1.61.2)
addressable (~> 2.5)
em-websocket (0.5.1)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
ethon (0.11.0)
ethon (0.12.0)
ffi (>= 1.3.0)
eventmachine (1.2.7)
execjs (2.7.0)
faraday (0.15.3)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
ffi (1.9.25)
ffi (1.11.1)
forwardable-extended (2.6.0)
gemoji (3.0.0)
github-pages (192)
activesupport (= 4.2.10)
github-pages-health-check (= 1.8.1)
jekyll (= 3.7.4)
gemoji (3.0.1)
github-pages (198)
activesupport (= 4.2.11.1)
github-pages-health-check (= 1.16.1)
jekyll (= 3.8.5)
jekyll-avatar (= 0.6.0)
jekyll-coffeescript (= 1.1.1)
jekyll-commonmark-ghpages (= 0.1.5)
jekyll-default-layout (= 0.1.4)
jekyll-feed (= 0.10.0)
jekyll-feed (= 0.11.0)
jekyll-gist (= 1.5.0)
jekyll-github-metadata (= 2.9.4)
jekyll-github-metadata (= 2.12.1)
jekyll-mentions (= 1.4.1)
jekyll-optional-front-matter (= 0.3.0)
jekyll-paginate (= 1.1.0)
jekyll-readme-index (= 0.2.0)
jekyll-redirect-from (= 0.14.0)
jekyll-relative-links (= 0.5.3)
jekyll-relative-links (= 0.6.0)
jekyll-remote-theme (= 0.3.1)
jekyll-sass-converter (= 1.5.2)
jekyll-seo-tag (= 2.5.0)
@@ -66,28 +66,28 @@ GEM
jekyll-theme-tactile (= 0.1.1)
jekyll-theme-time-machine (= 0.1.1)
jekyll-titles-from-headings (= 0.5.1)
jemoji (= 0.10.1)
jemoji (= 0.10.2)
kramdown (= 1.17.0)
liquid (= 4.0.0)
listen (= 3.1.5)
mercenary (~> 0.3)
minima (= 2.5.0)
nokogiri (>= 1.8.2, < 2.0)
nokogiri (>= 1.8.5, < 2.0)
rouge (= 2.2.1)
terminal-table (~> 1.4)
github-pages-health-check (1.8.1)
github-pages-health-check (1.16.1)
addressable (~> 2.3)
dnsruby (~> 1.60)
octokit (~> 4.0)
public_suffix (~> 2.0)
public_suffix (~> 3.0)
typhoeus (~> 1.3)
html-pipeline (2.9.0)
html-pipeline (2.11.0)
activesupport (>= 2)
nokogiri (>= 1.4)
http_parser.rb (0.6.0)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
jekyll (3.7.4)
jekyll (3.8.5)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
@@ -105,21 +105,21 @@ GEM
jekyll-coffeescript (1.1.1)
coffee-script (~> 2.2)
coffee-script-source (~> 1.11.1)
jekyll-commonmark (1.2.0)
jekyll-commonmark (1.3.1)
commonmarker (~> 0.14)
jekyll (>= 3.0, < 4.0)
jekyll (>= 3.7, < 5.0)
jekyll-commonmark-ghpages (0.1.5)
commonmarker (~> 0.17.6)
jekyll-commonmark (~> 1)
rouge (~> 2)
jekyll-default-layout (0.1.4)
jekyll (~> 3.0)
jekyll-feed (0.10.0)
jekyll-feed (0.11.0)
jekyll (~> 3.3)
jekyll-gist (1.5.0)
octokit (~> 4.2)
jekyll-github-metadata (2.9.4)
jekyll (~> 3.1)
jekyll-github-metadata (2.12.1)
jekyll (~> 3.4)
octokit (~> 4.0, != 4.4.0)
jekyll-mentions (1.4.1)
html-pipeline (~> 2.3)
@@ -131,7 +131,7 @@ GEM
jekyll (~> 3.0)
jekyll-redirect-from (0.14.0)
jekyll (~> 3.3)
jekyll-relative-links (0.5.3)
jekyll-relative-links (0.6.0)
jekyll (~> 3.3)
jekyll-remote-theme (0.3.1)
jekyll (~> 3.5)
@@ -185,9 +185,9 @@ GEM
jekyll-seo-tag (~> 2.0)
jekyll-titles-from-headings (0.5.1)
jekyll (~> 3.3)
jekyll-watch (2.1.2)
jekyll-watch (2.2.1)
listen (~> 3.0)
jemoji (0.10.1)
jemoji (0.10.2)
gemoji (~> 3.0)
html-pipeline (~> 2.2)
jekyll (~> 3.0)
@@ -198,37 +198,38 @@ GEM
rb-inotify (~> 0.9, >= 0.9.7)
ruby_dep (~> 1.2)
mercenary (0.3.6)
mini_portile2 (2.3.0)
mini_portile2 (2.4.0)
minima (2.5.0)
jekyll (~> 3.5)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
minitest (5.8.5)
multipart-post (2.0.0)
nokogiri (1.8.5)
mini_portile2 (~> 2.3.0)
octokit (4.13.0)
minitest (5.11.3)
multipart-post (2.1.1)
nokogiri (1.10.3)
mini_portile2 (~> 2.4.0)
octokit (4.14.0)
sawyer (~> 0.8.0, >= 0.5.3)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (2.0.5)
public_suffix (3.0.3)
rb-fsevent (0.10.3)
rb-inotify (0.9.10)
ffi (>= 0.5.0, < 2)
rb-inotify (0.10.0)
ffi (~> 1.0)
redcarpet (3.4.0)
rouge (2.2.1)
ruby-enum (0.7.2)
i18n
ruby_dep (1.5.0)
rubyzip (1.2.2)
safe_yaml (1.0.4)
sass (3.7.2)
safe_yaml (1.0.5)
sass (3.7.4)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
sawyer (0.8.1)
addressable (>= 2.3.5, < 2.6)
faraday (~> 0.8, < 1.0)
sawyer (0.8.2)
addressable (>= 2.3.5)
faraday (> 0.8, < 2.0)
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
thread_safe (0.3.6)
@@ -236,13 +237,14 @@ GEM
ethon (>= 0.9.0)
tzinfo (1.2.5)
thread_safe (~> 0.1)
unicode-display_width (1.4.0)
unicode-display_width (1.6.0)
PLATFORMS
ruby
DEPENDENCIES
github-pages
redcarpet
BUNDLED WITH
2.0.1

View File

@@ -1,5 +1,13 @@
# Dependencies
If you are running a build on Ubuntu you will need the following packages
# Dependencies for MacOS
Install the following for an easy to use dev environment:
* `brew install rbenv`
* `rbenv install 2.6.3`
* `gem install bundler`
# Dependencies for Linux
If you are running a build on Ubuntu you will need the following packages:
* ruby
* ruby-dev
* ruby-bundler
@@ -7,13 +15,15 @@ If you are running a build on Ubuntu you will need the following packages
* zlib1g-dev
* nginx (or apache2)
For other operating systems such as MacOS you will need equivalent packages or install xcode
# Local Development
1. Install Jekyll and plug-ins in one fell swoop. `gem install github-pages`
1. Install Jekyll and plug-ins in one fell swoop. `gem install github-pages`
This mirrors the plug-ins used by GitHub Pages on your local machine including Jekyll, Sass, etc.
2. Clone down your fork `git@github.com:smalltalk-ai/vmware-jekyll-velero.git`
3. cd into the `site` directory
4. Serve the site and watch for markup/sass changes `jekyll serve --livereload`. you may need to run `bundle exec jekyll serve --livereload`.
5. View your website at http://127.0.0.1:4000/
6. Commit any changes and push everything to the master branch of your GitHub user repository. GitHub Pages will then rebuild and serve your website.
2. Clone down your own fork, or clone the main repo `git clone https://github.com/heptio/velero` and add your own remote.
3. `cd velero/site`
4. `rbenv local 2.6.3`
5. `bundle install`
6. Serve the site and watch for markup/sass changes `jekyll serve --livereload`. You may need to run `bundle exec jekyll serve --livereload`.
7. View your website at http://127.0.0.1:4000/
8. Commit any changes and push everything to your fork.
9. Once you're ready, submit a PR of your changes. Netlify will automatically generate a preview of your changes.

View File

@@ -7,6 +7,7 @@ url:
logo: Velero.svg
vm_logo: vm-logo.png
gh_repo: https://github.com/heptio/velero
markdown: redcarpet
hero:
background-color: med-blue
footer:
@@ -173,3 +174,6 @@ exclude:
- CNAME
- Runbook.docx
- '*.sh'
redcarpet:
extensions: ["no_intra_emphasis", "tables", "autolink", "strikethrough", "with_toc_data"]

View File

@@ -0,0 +1,9 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
<link rel="icon" href="/favicon.ico" type="image/x-icon">
<link rel="stylesheet" href="/css/styles.css?{{site.time | date: '%s%N'}}">
</head>

View File

@@ -1,10 +1,17 @@
<!DOCTYPE html>
<html lang="en">
{% include head.html %}
{% include head-docs.html %}
{% if page.version != "master" %}
<!-- Block google from indexing versioned docs -->
<meta name="robots" content="noindex">
{% endif %}
{% if page.name != "README.md" %}
<title>{{ site.title }} Docs - {{page.title}}</title>
{% endif %}
{% if page.name == "README.md" %}
<title>{{ site.title }} Docs - Overview</title>
{% endif %}
<body id="docs">
<div class="container-fluid site-outer-container">
<div class="site-container">
@@ -21,7 +28,7 @@
<div class="section-content pt-4 pb-0">
<div class="row">
<div class="col-md-3">
{% include versions.html %}
{% include versions.html %}
{% include nav.html %}
</div>
<div class="col-md-8 offset-md-1">
@@ -37,4 +44,4 @@
</div>
</body>
</html>
</html>

View File

@@ -35,4 +35,22 @@ h5 {
strong {
font-weight: $font-weight-semibold;
}
}
pre {
display: block;
font-size: $code-font-size;
color: $pre-color;
background-color: #f2f2f2;
padding-top: 5px;
padding-bottom: 5px;
padding-left: 5px;
padding-right: 5px;
// Account for some code outputs that place code tags in pre tags
code {
font-size: inherit;
color: inherit;
word-break: normal;
}
}

View File

@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
2. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
```
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
3. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -50,87 +52,92 @@ aws s3api create-bucket \
For more information, see [the AWS documentation on IAM users][14].
1. Create the IAM user:
```bash
aws iam create-user --user-name velero
```
```bash
aws iam create-user --user-name velero
```
If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
2. Attach policies to give `velero` the necessary permissions:
```bash
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-user-policy \
--user-name velero \
--policy-name velero \
--policy-document file://velero-policy.json
```
```
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-user-policy \
--user-name velero \
--policy-name velero \
--policy-document file://velero-policy.json
```
3. Create an access key for the user:
```bash
aws iam create-access-key --user-name velero
```
```bash
aws iam create-access-key --user-name velero
```
The result should look like:
```json
{
"AccessKey": {
"UserName": "velero",
"Status": "Active",
"CreateDate": "2017-07-31T22:24:41.576Z",
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
"AccessKeyId": <AWS_ACCESS_KEY_ID>
}
}
```
```json
{
"AccessKey": {
"UserName": "velero",
"Status": "Active",
"CreateDate": "2017-07-31T22:24:41.576Z",
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
"AccessKeyId": <AWS_ACCESS_KEY_ID>
}
}
```
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
```bash
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
```
```bash
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
```
where the access key id and secret are the values returned from the `create-access-key` request.
@@ -158,30 +165,30 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
## Setting AWS_CLUSTER_NAME (Optional)
* If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
* Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
```bash
kubectl get nodes -o jsonpath='{.items[*].spec.externalID}'
```
```bash
kubectl get nodes -o jsonpath='{.items[*].spec.externalID}'
```
Copy one of the returned IDs `<ID>` and use it with the `aws` CLI tool to search for one of the following:
Copy one of the returned IDs `<ID>` and use it with the `aws` CLI tool to search for one of the following:
* The `kubernetes.io/cluster/<AWS_CLUSTER_NAME>` tag of the value `owned`. The `<AWS_CLUSTER_NAME>` is then your cluster's name:
* The `kubernetes.io/cluster/<AWS_CLUSTER_NAME>` tag of the value `owned`. The `<AWS_CLUSTER_NAME>` is then your cluster's name:
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=value,Values=owned"
```
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=value,Values=owned"
```
* If the first output returns nothing, then check for the legacy Tag `KubernetesCluster` of the value `<AWS_CLUSTER_NAME>`:
* If the first output returns nothing, then check for the legacy Tag `KubernetesCluster` of the value `<AWS_CLUSTER_NAME>`:
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
```
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
```
## ALTERNATIVE: Setup permissions using kube2iam
@@ -192,103 +199,109 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally.
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
```bash
cat > velero-trust-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
```
cat > velero-trust-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
"Action": "sts:AssumeRole"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_CREATED_WHEN_INITIALIZING_KUBE2IAM>"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
```
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_CREATED_WHEN_INITIALIZING_KUBE2IAM>"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
```
2. Create the IAM role:
```bash
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
```
```bash
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
```
3. Attach policies to give `velero` the necessary permissions:
```bash
BUCKET=<YOUR_BUCKET>
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-role-policy \
--role-name velero \
--policy-name velero-policy \
--policy-document file://./velero-policy.json
```
```
BUCKET=<YOUR_BUCKET>
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-role-policy \
--role-name velero \
--policy-name velero-policy \
--policy-document file://./velero-policy.json
```
4. Update `AWS_ACCOUNT_ID` & `VELERO_ROLE_NAME` with `kubectl edit deploy/velero -n velero` and add the following annotation:
```
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
namespace: velero
name: velero
spec:
replicas: 1
template:
metadata:
labels:
component: velero
annotations:
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
```
```
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
namespace: velero
name: velero
spec:
replicas: 1
template:
metadata:
labels:
component: velero
annotations:
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
...
```
## Installing the nginx example (optional)

View File

@@ -7,7 +7,7 @@ To configure Velero on Azure, you:
* Create Azure service principal for Velero
* Install the server
If you do not have the `az` Azure CLI 2.0 installed locally, follow the [install guide][18] to set it up.
If you do not have the `az` Azure CLI 2.0 installed locally, follow the [install guide][18] to set it up.
Run:
@@ -29,9 +29,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -45,17 +47,21 @@ separated into its own Resource Group. The example below shows the storage accou
separate `Velero_Backups` Resource Group.
The storage account needs to be created with a globally unique id since this is used for dns. In
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
this name however you'd like, following the [Azure naming rules for storage accounts][19]. The
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
this name however you'd like, following the [Azure naming rules for storage accounts][19]. The
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
configured to only allow access via https.
Create a resource group for the backups storage account. Change the location as needed.
```bash
# Create a resource group for the backups storage account. Change the location as needed.
AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
```
# Create the storage account
Create the storage account.
```bash
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
az storage account create \
--name $AZURE_STORAGE_ACCOUNT_ID \
@@ -78,7 +84,7 @@ az storage container create -n $BLOB_CONTAINER --public-access off --account-nam
1. Set the name of the Resource Group that contains your Kubernetes cluster's virtual machines/disks.
> **WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
**WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
when you provision your cluster in Azure, since this is the resource group that contains your cluster's virtual machines/disks.
```bash
@@ -106,31 +112,38 @@ To integrate Velero with Azure, you must create a Velero-specific [service princ
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
> If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
Create service principal and specify your own password:
```bash
# Create service principal and specify your own password
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET
```
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
```bash
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv`
```
# After creating the service principal, obtain the client id
After creating the service principal, obtain the client id.
```bash
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
```
Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
1. Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
```bash
cat << EOF > ./credentials-velero
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
AZURE_TENANT_ID=${AZURE_TENANT_ID}
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
EOF
```
```
cat << EOF > ./credentials-velero
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
AZURE_TENANT_ID=${AZURE_TENANT_ID}
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
EOF
```
## Install and start Velero
@@ -157,7 +170,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
[0]: namespace.md
[8]: api-types/volumesnapshotlocation.md#azure

View File

@@ -37,10 +37,13 @@ Note that the Makefile targets assume building from a git repository. When build
There are a number of different ways to build `velero` depending on your needs. This section outlines the main possibilities.
To build the `velero` binary on your local machine, compiled for your OS and architecture, run:
```bash
go build ./cmd/velero
```
or:
```bash
make local
```
@@ -48,6 +51,7 @@ make local
The latter will place the compiled binary under `$PWD/_output/bin/$GOOS/$GOARCH`, and will splice version and git commit information in so that `velero version` displays proper output. `velero install` will also use the version information to determine which tagged image to deploy.
To build the `velero` binary targeting `linux/amd64` within a build container on your local machine, run:
```bash
make build
```
@@ -55,11 +59,13 @@ make build
See the **Cross compiling** section below for details on building for alternate OS/architecture combinations.
To build a Velero container image, first set the `$REGISTRY` environment variable. For example, if you want to build the `gcr.io/my-registry/velero:master` image, set `$REGISTRY` to `gcr.io/my-registry`. Optionally, set the `$VERSION` environment variable to change the image tag. Then, run:
```bash
make container
```
To push your image to a registry, run:
```bash
make push
```
@@ -82,7 +88,7 @@ Run `make update` to regenerate files if you make the following changes:
Run [generate-proto.sh][13] to regenerate files if you make the following changes:
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14] and compiler plugin `protoc-gen-go` version v1.0.0.
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14] and compiler plugin `protoc-gen-go` version v1.0.0.
### Cross compiling
@@ -106,7 +112,7 @@ files (clientset, listers, shared informers, docs) are up to date.
## 4. Run
### Prerequisites
### Prerequisites
When running Velero, you will need to ensure that you set up all of the following:
@@ -169,7 +175,7 @@ Using the `velero` binary that you've built, run `velero install`:
```bash
# velero install requires a credentials file to exist, but we will
# not be using it since we're running the server locally, so just
# not be using it since we're running the server locally, so just
# create an empty file to pass to the install command.
touch fake-credentials-file
@@ -181,8 +187,8 @@ velero install \
--secret-file ./fake-credentials-file
# 'velero install' creates an in-cluster deployment of the
# velero server using an official velero image, but we want
# to run the velero server on our local machine using the
# velero server using an official velero image, but we want
# to run the velero server on our local machine using the
# binary we built, so delete the in-cluster deployment.
kubectl --namespace velero delete deployment velero
@@ -205,15 +211,16 @@ rm fake-credentials-file
1. Ensure you've built a `velero` container image and either loaded it onto your cluster's nodes, or pushed it to a registry (see [build][3]).
1. Install Velero into the cluster (the example below assumes you're using AWS):
```bash
velero install \
--provider aws \
--image $YOUR_CONTAINER_IMAGE \
--bucket $BUCKET \
--backup-location-config region=$REGION \
--snapshot-location-config region=$REGION \
--secret-file $YOUR_AWS_CREDENTIALS_FILE
```
```bash
velero install \
--provider aws \
--image $YOUR_CONTAINER_IMAGE \
--bucket $BUCKET \
--backup-location-config region=$REGION \
--snapshot-location-config region=$REGION \
--secret-file $YOUR_AWS_CREDENTIALS_FILE
```
## 5. Vendoring dependencies

View File

@@ -3,7 +3,7 @@
## General
### `invalid configuration: no configuration has been provided`
This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the
This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the
following locations:
* the path specified by the `--kubeconfig` flag, if any
* the path specified by the `$KUBECONFIG` environment variable, if any
@@ -22,22 +22,25 @@ kubectl -n velero logs deployment/velero
### `NoCredentialProviders: no valid providers in chain`
#### Using credentials
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
into the Velero server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Velero server's namespace
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
* The `credentials-velero` file is formatted properly and has the correct values:
```
[default]
aws_access_key_id=<your AWS access key ID>
aws_secret_access_key=<your AWS secret access key>
```
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
#### Using kube2iam
This means that Velero can't read the content of the S3 bucket. Ensure the following:
* There is a Trust Policy document allowing the role used by kube2iam to assume Velero's role, as stated in the AWS config documentation.
* The new Velero role has all the permissions listed in the documentation regarding S3.
@@ -45,8 +48,9 @@ This means that Velero can't read the content of the S3 bucket. Ensure the follo
## Azure
### `Failed to refresh the Token` or `adal: Refresh request failed`
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
properly into the Velero server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Velero server's namespace
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
@@ -56,8 +60,9 @@ properly into the Velero server pod. Ensure the following:
## GCE/GKE
### `open credentials/cloud: no such file or directory`
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
into the Velero server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Velero server's namespace
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
* The `cloud-credentials` secret is defined as a volume for the Velero deployment

View File

@@ -16,10 +16,13 @@ backup-test-2-20170726180515 backup-test-2 Completed 0 1 2
```
To delve into the warnings and errors into more detail, you can use `velero restore describe`:
```
```bash
velero restore describe backup-test-20170726180512
```
The output looks like this:
```
Name: backup-test-20170726180512
Namespace: velero

View File

@@ -9,6 +9,7 @@ If you periodically back up your cluster's resources, you are able to return to
```
velero schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
```
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
1. A disaster happens and you need to recreate your resources.
@@ -16,9 +17,7 @@ If you periodically back up your cluster's resources, you are able to return to
1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
1. Create a restore with your most recent Velero Backup:
```
velero restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
```

View File

@@ -1,9 +1,9 @@
# Run Velero on GCP
You can run Kubernetes on Google Cloud Platform in either:
You can run Kubernetes on Google Cloud Platform in either:
* Kubernetes on Google Compute Engine virtual machines
* Google Kubernetes Engine
* Google Kubernetes Engine
If you do not have the `gcloud` and `gsutil` CLIs locally installed, follow the [user guide][16] to set them up.
@@ -16,9 +16,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -44,7 +46,7 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
```
Store the `project` value from the results in the environment variable `$PROJECT_ID`.
```bash
PROJECT_ID=$(gcloud config get-value project)
```
@@ -64,7 +66,7 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
```
Set the `$SERVICE_ACCOUNT_EMAIL` variable to match its `email` value.
```bash
SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \
--filter="displayName:Velero service account" \
@@ -74,7 +76,6 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
3. Attach policies to give `velero` the necessary permissions to function:
```bash
ROLE_PERMISSIONS=(
compute.disks.get
compute.disks.create
@@ -136,14 +137,13 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
[0]: namespace.md
[7]: api-types/backupstoragelocation.md#gcp
[8]: api-types/volumesnapshotlocation.md#gcp
[15]: https://cloud.google.com/compute/docs/access/service-accounts
[16]: https://cloud.google.com/sdk/docs/
[20]: faq.md
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap
[0]: namespace.md
[7]: api-types/backupstoragelocation.md#gcp
[8]: api-types/volumesnapshotlocation.md#gcp
[15]: https://cloud.google.com/compute/docs/access/service-accounts
[16]: https://cloud.google.com/sdk/docs/
[20]: faq.md
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap

View File

@@ -1,8 +1,8 @@
## Getting started
The following example sets up the Velero server and client, then backs up and restores a sample application.
The following example sets up the Velero server and client, then backs up and restores a sample application.
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
For additional functionality with this setup, see the docs on how to [expose Minio outside your cluster][31].
**NOTE** The example lets you explore basic Velero functionality. Configuring Minio for production is out of scope.
@@ -26,9 +26,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -36,6 +38,7 @@ of the Velero repository is under active development and is not guaranteed to be
#### MacOS Installation
On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client:
```bash
brew install velero
```
@@ -54,9 +57,10 @@ These instructions start the Velero server and a Minio instance that is accessib
1. Start the server and the local storage service. In the Velero directory, run:
```bash
```
kubectl apply -f examples/minio/00-minio-deployment.yaml
```
```
velero install \
--provider aws \
--bucket velero \
@@ -91,11 +95,11 @@ These instructions start the Velero server and a Minio instance that is accessib
velero backup create nginx-backup --selector app=nginx
```
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
```
velero backup create nginx-backup --selector 'backup notin (ignore)'
```
```
velero backup create nginx-backup --selector 'backup notin (ignore)'
```
1. (Optional) Create regularly scheduled backups based on a cron expression using the `app=nginx` label selector:
@@ -126,7 +130,7 @@ These instructions start the Velero server and a Minio instance that is accessib
```
You should get no results.
NOTE: You might need to wait for a few minutes for the namespace to be fully cleaned up.
### Restore
@@ -210,21 +214,19 @@ You must also get the Minio URL, which you can then specify as the value of the
1. Get the Minio URL:
- if you're running Minikube:
- if you're running Minikube:
```shell
minikube service minio --namespace=velero --url
```
- in any other environment:
- in any other environment:
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
```shell
kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
```
```shell
kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
```
1. Edit your `BackupStorageLocation` YAML, adding `publicUrl: <URL_FROM_PREVIOUS_STEP>` as a field under `spec.config`. You must include the `http://` or `https://` prefix.
@@ -252,7 +254,7 @@ Add `publicUrl: http://localhost:9000` under the `spec.config` section.
Configuring Ingress for your cluster is out of scope for the Velero documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Velero configuration with Minio.
In this case:
In this case:
1. Keep the Service type as `ClusterIP`.

View File

@@ -18,22 +18,26 @@ You can use the following annotations on a pod to make Velero execute a hook whe
#### Pre hooks
| Annotation Name | Description |
| --- | --- |
| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
* `pre.hook.backup.velero.io/container`
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
* `pre.hook.backup.velero.io/command`
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
* `pre.hook.backup.velero.io/on-error`
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
* `pre.hook.backup.velero.io/timeout`
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
#### Post hooks
| Annotation Name | Description |
| --- | --- |
| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
* `post.hook.backup.velero.io/container`
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
* `post.hook.backup.velero.io/command`
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
* `post.hook.backup.velero.io/on-error`
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
* `post.hook.backup.velero.io/timeout`
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
### Specifying Hooks in the Backup Spec

View File

@@ -1,5 +1,5 @@
# Use IBM Cloud Object Storage as Velero's storage destination.
You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups.
You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups.
To set up IBM Cloud Object Storage (COS) as Velero's destination, you:
@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -80,14 +82,14 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
[0]: namespace.md
[1]: https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#creating-a-new-resource-instance
[2]: https://console.bluemix.net/docs/services/cloud-object-storage/getting-started.html#create-buckets
[3]: https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials
[4]: https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/kc_welcome_containers.html
[5]: https://console.bluemix.net/docs/containers/container_index.html#container_index
[6]: api-types/backupstoragelocation.md#aws
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
[0]: namespace.md
[1]: https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#creating-a-new-resource-instance
[2]: https://console.bluemix.net/docs/services/cloud-object-storage/getting-started.html#create-buckets
[3]: https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials
[4]: https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/kc_welcome_containers.html
[5]: https://console.bluemix.net/docs/containers/container_index.html#container_index
[6]: api-types/backupstoragelocation.md#aws
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html

View File

@@ -17,17 +17,17 @@ For details, see the documentation topics for individual cloud providers.
The Velero client includes an `install` command to specify the settings for each supported cloud provider. You can install Velero for the included cloud providers using the following command:
```bash
velero install \
--provider <YOUR_PROVIDER> \
--bucket <YOUR_BUCKET> \
--secret-file <PATH_TO_FILE> \
[--backup-location-config]
[--snapshot-location-config]
[--namespace]
[--use-volume-snapshots]
[--use-restic]
velero install \
--provider <YOUR_PROVIDER> \
--bucket <YOUR_BUCKET> \
--secret-file <PATH_TO_FILE> \
[--backup-location-config]
[--snapshot-location-config]
[--namespace]
[--use-volume-snapshots]
[--use-restic]
```
For provider-specific instructions, see:
* [Run Velero on AWS][0]
@@ -43,10 +43,10 @@ For more complex installation needs, use either the generated YAML, or the Helm
## On-premises
You can run Velero in an on-premises cluster in different ways depending on your requirements.
You can run Velero in an on-premises cluster in different ways depending on your requirements.
First, you must select an object storage backend that Velero can use to store backup data. [Compatible Storage Providers][99] contains information on various
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
not using another storage platform that offers an S3-compatible object storage API.
Second, if you need to back up persistent volume data, you must select a volume backup solution. [Volume Snapshot Providers][100] contains information on

View File

@@ -76,6 +76,7 @@ velero backup-location create s3-alt-region \
```
During backup creation:
```shell
# The Velero server will automatically store backups in the backup storage location named "default" if
# one is not specified when creating the backup. You can alter which backup storage location is used
@@ -83,7 +84,9 @@ During backup creation:
# by the Velero deployment) to the name of a different backup storage location.
velero backup create full-cluster-backup
```
Or:
```shell
velero backup create full-cluster-alternate-location-backup \
--storage-location s3-alt-region
@@ -106,10 +109,10 @@ velero snapshot-location create portworx-cloud \
During backup creation:
```shell
# Note that since in this example we have two possible volume snapshot locations for the Portworx
# Note that since in this example we have two possible volume snapshot locations for the Portworx
# provider, we need to explicitly specify which one to use when creating a backup. Alternately,
# you can set the --default-volume-snapshot-locations flag on the `velero server` command (run by
# the Velero deployment) to specify which location should be used for each provider by default, in
# the Velero deployment) to specify which location should be used for each provider by default, in
# which case you don't need to specify it when creating a backup.
velero backup create local-snapshot-backup \
--volume-snapshot-locations portworx-local
@@ -140,8 +143,9 @@ velero snapshot-location create ebs-us-west-1 \
```
During backup creation:
```shell
# Velero will automatically use your configured backup storage location and volume snapshot location.
# Velero will automatically use your configured backup storage location and volume snapshot location.
# Nothing needs to be specified when creating a backup.
velero backup create full-cluster-backup
```

View File

@@ -9,6 +9,7 @@ Velero can help you port your resources from one cluster to another, as long as
```
velero backup create <BACKUP-NAME>
```
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Velero deployment YAML.

View File

@@ -7,7 +7,7 @@ First, ensure you've [downloaded & extracted the latest release][0].
Then, install Velero using the `--namespace` flag:
```bash
velero install --bucket <YOUR_BUCKET> --provider <YOUR_PROVIDER> --namespace <YOUR_NAMESPACE>
velero install --bucket <YOUR_BUCKET> --provider <YOUR_PROVIDER> --namespace <YOUR_NAMESPACE>
```
@@ -17,7 +17,7 @@ Then, install Velero using the `--namespace` flag:
To specify the namespace for all Velero client commands, run:
```bash
velero client config set namespace=<NAMESPACE_VALUE>
velero client config set namespace=<NAMESPACE_VALUE>
```

View File

@@ -15,6 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru
- a plugin with the same name cannot not already exist
### Some examples:
```
- example.io/azure
- 1.2.3.4/5678
@@ -34,9 +35,9 @@ Velero currently supports the following kinds of plugins:
## Plugin Logging
Velero provides a [logger][2] that can be used by plugins to log structured information to the main Velero server log or
per-backup/restore logs. It also passes a `--log-level` flag to each plugin binary, whose value is the value of the same
flag from the main Velero process. This means that if you turn on debug logging for the Velero server via `--log-level=debug`,
Velero provides a [logger][2] that can be used by plugins to log structured information to the main Velero server log or
per-backup/restore logs. It also passes a `--log-level` flag to each plugin binary, whose value is the value of the same
flag from the main Velero process. This means that if you turn on debug logging for the Velero server via `--log-level=debug`,
plugins will also emit debug-level logs. See the [sample repository][1] for an example of how to use the logger within your plugin.

View File

@@ -2,15 +2,15 @@
Velero has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called [restic][1]. This support is considered beta quality. Please see the list of [limitations](#limitations) to understand if it currently fits your use case.
Velero has always allowed you to take snapshots of persistent volumes as part of your backups if youre using one of
the supported cloud providers block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
We also provide a plugin model that enables anyone to implement additional object and block storage backends, outside the
Velero has always allowed you to take snapshots of persistent volumes as part of your backups if youre using one of
the supported cloud providers block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
We also provide a plugin model that enables anyone to implement additional object and block storage backends, outside the
main Velero repository.
We integrated restic with Velero so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes
volume*. This is a new capability for Velero, not a replacement for existing functionality. If you're running on AWS, and
taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you've
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
local, or any other volume type that doesn't have a native snapshot concept, restic might be for you.
Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable
@@ -30,20 +30,20 @@ Ensure you've [downloaded latest release][3].
To install restic, use the `--use-restic` flag on the `velero install` command. See the [install overview][2] for more details.
Please note: In RancherOS , the path is not `/var/lib/kubelet/pods` , rather it is `/opt/rke/var/lib/kubelet/pods`
thereby requires modifying the restic daemonset after installing.
Please note: In RancherOS , the path is not `/var/lib/kubelet/pods` , rather it is `/opt/rke/var/lib/kubelet/pods`
thereby requires modifying the restic daemonset after installing.
```yaml
hostPath:
path: /var/lib/kubelet/pods
```
```yaml
hostPath:
path: /var/lib/kubelet/pods
```
to
to
```yaml
hostPath:
path: /opt/rke/var/lib/kubelet/pods
```
```yaml
hostPath:
path: /opt/rke/var/lib/kubelet/pods
```
You're now ready to use Velero with restic.
@@ -76,7 +76,7 @@ You're now ready to use Velero with restic.
mountPath: /volume-2
volumes:
- name: pvc-volume
persistentVolumeClaim:
persistentVolumeClaim:
claimName: test-volume-claim
- name: emptydir-volume
emptyDir: {}
@@ -100,7 +100,8 @@ You're now ready to use Velero with restic.
```bash
velero backup describe YOUR_BACKUP_NAME
```
```bash
kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml
```
@@ -116,21 +117,22 @@ You're now ready to use Velero with restic.
```bash
velero restore describe YOUR_RESTORE_NAME
```
```bash
kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml
```
## Limitations
- `hostPath` volumes are not supported. [Local persistent volumes][4] are supported.
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
common encryption key for all restic repositories created by Velero. **This means that anyone who has access to your
bucket can decrypt your restic backup data**. Make sure that you limit access to the restic bucket
appropriately. We plan to implement full Velero backup encryption, including securing the restic encryption keys, in
appropriately. We plan to implement full Velero backup encryption, including securing the restic encryption keys, in
a future release.
- The current Velero/restic integration relies on using pod names to associate restic backups with their parents. If a pod is restarted, such as with a Deployment,
- The current Velero/restic integration relies on using pod names to associate restic backups with their parents. If a pod is restarted, such as with a Deployment,
the next restic backup taken will be treated as a completely new backup, not an incremental one.
- Restic scans each file in a single thread. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual
- Restic scans each file in a single thread. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual
difference is small.
## Customize Restore Helper Image
@@ -225,11 +227,11 @@ and `restic prune`.
- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Velero backup process creates
one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this
resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes
`restic backup` commands to backup pod volume data.
`restic backup` commands to backup pod volume data.
- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Velero restore process creates one
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
on that node. The controller executes `restic restore` commands to restore pod volume data.
### Backup

View File

@@ -16,8 +16,8 @@ _If you're sure that you do not have any backups that were originally created pr
We've added a CLI command to [Velero v0.11.1][1], `velero migrate-backups`, to help you with this. This command will:
- Replace `ark-backup.json` files in object storage with equivalent `velero-backup.json` files.
- Create `<backup-name>-volumesnapshots.json.gz` files in object storage if they don't already exist, containing snapshot metadata populated from the backups' `status.volumeBackups` field*.
- Replace `ark-backup.json` files in object storage with equivalent `velero-backup.json` files.
- Create `<backup-name>-volumesnapshots.json.gz` files in object storage if they don't already exist, containing snapshot metadata populated from the backups' `status.volumeBackups` field*.
_*backups created prior to v0.10 stored snapshot metadata in the `status.volumeBackups` field, but it has subsequently been replaced with the `<backup-name>-volumesnapshots.json.gz` file._
@@ -26,42 +26,54 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
1. Download the [v0.11.1 release tarball][1] tarball for your client platform.
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
1. Scale down your existing Velero deployment:
```bash
kubectl -n velero scale deployment/velero --replicas 0
```
1. Fetch velero's credentials for accessing your object storage bucket and store them locally for use by `velero migrate-backups`:
For AWS:
```bash
# for AWS
export AWS_SHARED_CREDENTIALS_FILE=./velero-migrate-backups-credentials
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $AWS_SHARED_CREDENTIALS_FILE
````
# for Azure
For Azure:
```bash
export AZURE_SUBSCRIPTION_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_SUBSCRIPTION_ID}" | base64 --decode)
export AZURE_TENANT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_TENANT_ID}" | base64 --decode)
export AZURE_CLIENT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_ID}" | base64 --decode)
export AZURE_CLIENT_SECRET=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_SECRET}" | base64 --decode)
export AZURE_RESOURCE_GROUP=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_RESOURCE_GROUP}" | base64 --decode)
```
# for GCP
For GCP:
```bash
export GOOGLE_APPLICATION_CREDENTIALS=./velero-migrate-backups-credentials
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $GOOGLE_APPLICATION_CREDENTIALS
```
1. List all of your backup storage locations:
```bash
velero backup-location get
```
1. For each backup storage location that you want to use with Velero 1.0, replace any legacy pre-v0.11 backup metadata with the equivalent current formats:
```bash
```
# - BACKUP_LOCATION_NAME is the name of a backup location from the previous step, whose
# backup metadata will be updated in object storage
# - SNAPSHOT_LOCATION_NAME is the name of the volume snapshot location that Velero should
@@ -73,24 +85,33 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
```
1. Scale up your deployment:
```bash
kubectl -n velero scale deployment/velero --replicas 1
```
1. Remove the local velero credentials:
```bash
# for AWS
1. Remove the local `velero` credentials:
For AWS:
```
rm $AWS_SHARED_CREDENTIALS_FILE
unset AWS_SHARED_CREDENTIALS_FILE
```
# for Azure
For Azure:
```
unset AZURE_SUBSCRIPTION_ID
unset AZURE_TENANT_ID
unset AZURE_CLIENT_ID
unset AZURE_CLIENT_SECRET
unset AZURE_RESOURCE_GROUP
```
# for GCP
For GCP:
```
rm $GOOGLE_APPLICATION_CREDENTIALS
unset GOOGLE_APPLICATION_CREDENTIALS
```
@@ -104,13 +125,15 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
1. Download the [v1.0 release tarball][2] tarball for your client platform.
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
1. Move the `velero` binary from the Velero directory to somewhere in your PATH, replacing any existing pre-1.0 `velero` binaries.
1. Update the image for the Velero deployment and daemon set (if applicable):
```bash
kubectl -n velero set image deployment/velero velero=gcr.io/heptio-images/velero:v1.0.0
kubectl -n velero set image daemonset/restic restic=gcr.io/heptio-images/velero:v1.0.0

View File

@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
2. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
```
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
3. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -50,87 +52,92 @@ aws s3api create-bucket \
For more information, see [the AWS documentation on IAM users][14].
1. Create the IAM user:
```bash
aws iam create-user --user-name velero
```
```bash
aws iam create-user --user-name velero
```
If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
2. Attach policies to give `velero` the necessary permissions:
```bash
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-user-policy \
--user-name velero \
--policy-name velero \
--policy-document file://velero-policy.json
```
```
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-user-policy \
--user-name velero \
--policy-name velero \
--policy-document file://velero-policy.json
```
3. Create an access key for the user:
```bash
aws iam create-access-key --user-name velero
```
```bash
aws iam create-access-key --user-name velero
```
The result should look like:
```json
{
"AccessKey": {
"UserName": "velero",
"Status": "Active",
"CreateDate": "2017-07-31T22:24:41.576Z",
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
"AccessKeyId": <AWS_ACCESS_KEY_ID>
}
}
```
```json
{
"AccessKey": {
"UserName": "velero",
"Status": "Active",
"CreateDate": "2017-07-31T22:24:41.576Z",
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
"AccessKeyId": <AWS_ACCESS_KEY_ID>
}
}
```
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
```bash
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
```
```bash
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
```
where the access key id and secret are the values returned from the `create-access-key` request.
@@ -158,30 +165,30 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
## Setting AWS_CLUSTER_NAME (Optional)
* If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
If you have multiple clusters and you want to support migration of resources between them, you can use `kubectl edit deploy/velero -n velero` to edit your deployment:
* Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
Add the environment variable `AWS_CLUSTER_NAME` under `spec.template.spec.env`, with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
```bash
kubectl get nodes -o jsonpath='{.items[*].spec.externalID}'
```
```bash
kubectl get nodes -o jsonpath='{.items[*].spec.externalID}'
```
Copy one of the returned IDs `<ID>` and use it with the `aws` CLI tool to search for one of the following:
Copy one of the returned IDs `<ID>` and use it with the `aws` CLI tool to search for one of the following:
* The `kubernetes.io/cluster/<AWS_CLUSTER_NAME>` tag of the value `owned`. The `<AWS_CLUSTER_NAME>` is then your cluster's name:
* The `kubernetes.io/cluster/<AWS_CLUSTER_NAME>` tag of the value `owned`. The `<AWS_CLUSTER_NAME>` is then your cluster's name:
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=value,Values=owned"
```
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=value,Values=owned"
```
* If the first output returns nothing, then check for the legacy Tag `KubernetesCluster` of the value `<AWS_CLUSTER_NAME>`:
* If the first output returns nothing, then check for the legacy Tag `KubernetesCluster` of the value `<AWS_CLUSTER_NAME>`:
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
```
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
```
## ALTERNATIVE: Setup permissions using kube2iam
@@ -192,103 +199,109 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally.
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
```bash
cat > velero-trust-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
```
cat > velero-trust-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
"Action": "sts:AssumeRole"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_CREATED_WHEN_INITIALIZING_KUBE2IAM>"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
```
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_CREATED_WHEN_INITIALIZING_KUBE2IAM>"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
```
2. Create the IAM role:
```bash
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
```
```bash
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
```
3. Attach policies to give `velero` the necessary permissions:
```bash
BUCKET=<YOUR_BUCKET>
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-role-policy \
--role-name velero \
--policy-name velero-policy \
--policy-document file://./velero-policy.json
```
```
BUCKET=<YOUR_BUCKET>
cat > velero-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateSnapshot",
"ec2:DeleteSnapshot"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::${BUCKET}/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${BUCKET}"
]
}
]
}
EOF
```
```bash
aws iam put-role-policy \
--role-name velero \
--policy-name velero-policy \
--policy-document file://./velero-policy.json
```
4. Update `AWS_ACCOUNT_ID` & `VELERO_ROLE_NAME` with `kubectl edit deploy/velero -n velero` and add the following annotation:
```
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
namespace: velero
name: velero
spec:
replicas: 1
template:
metadata:
labels:
component: velero
annotations:
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
```
```
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
namespace: velero
name: velero
spec:
replicas: 1
template:
metadata:
labels:
component: velero
annotations:
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
...
```
## Installing the nginx example (optional)

View File

@@ -7,7 +7,7 @@ To configure Velero on Azure, you:
* Create Azure service principal for Velero
* Install the server
If you do not have the `az` Azure CLI 2.0 installed locally, follow the [install guide][18] to set it up.
If you do not have the `az` Azure CLI 2.0 installed locally, follow the [install guide][18] to set it up.
Run:
@@ -29,9 +29,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -45,17 +47,21 @@ separated into its own Resource Group. The example below shows the storage accou
separate `Velero_Backups` Resource Group.
The storage account needs to be created with a globally unique id since this is used for dns. In
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
this name however you'd like, following the [Azure naming rules for storage accounts][19]. The
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
this name however you'd like, following the [Azure naming rules for storage accounts][19]. The
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
configured to only allow access via https.
Create a resource group for the backups storage account. Change the location as needed.
```bash
# Create a resource group for the backups storage account. Change the location as needed.
AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
```
# Create the storage account
Create the storage account.
```bash
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
az storage account create \
--name $AZURE_STORAGE_ACCOUNT_ID \
@@ -78,7 +84,7 @@ az storage container create -n $BLOB_CONTAINER --public-access off --account-nam
1. Set the name of the Resource Group that contains your Kubernetes cluster's virtual machines/disks.
> **WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
**WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
when you provision your cluster in Azure, since this is the resource group that contains your cluster's virtual machines/disks.
```bash
@@ -106,31 +112,38 @@ To integrate Velero with Azure, you must create a Velero-specific [service princ
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
> If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
Create service principal and specify your own password:
```bash
# Create service principal and specify your own password
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET
```
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
```bash
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv`
```
# After creating the service principal, obtain the client id
After creating the service principal, obtain the client id.
```bash
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
```
Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
1. Now you need to create a file that contains all the environment variables you just set. The command looks like the following:
```bash
cat << EOF > ./credentials-velero
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
AZURE_TENANT_ID=${AZURE_TENANT_ID}
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
EOF
```
```
cat << EOF > ./credentials-velero
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
AZURE_TENANT_ID=${AZURE_TENANT_ID}
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
EOF
```
## Install and start Velero
@@ -157,7 +170,7 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
Replace `<YOUR_STORAGE_CLASS_NAME>` with `default`. This is Azure's default `StorageClass` name.
[0]: namespace.md
[8]: api-types/volumesnapshotlocation.md#azure

View File

@@ -37,10 +37,13 @@ Note that the Makefile targets assume building from a git repository. When build
There are a number of different ways to build `velero` depending on your needs. This section outlines the main possibilities.
To build the `velero` binary on your local machine, compiled for your OS and architecture, run:
```bash
go build ./cmd/velero
```
or:
```bash
make local
```
@@ -48,6 +51,7 @@ make local
The latter will place the compiled binary under `$PWD/_output/bin/$GOOS/$GOARCH`, and will splice version and git commit information in so that `velero version` displays proper output. `velero install` will also use the version information to determine which tagged image to deploy.
To build the `velero` binary targeting `linux/amd64` within a build container on your local machine, run:
```bash
make build
```
@@ -55,11 +59,13 @@ make build
See the **Cross compiling** section below for details on building for alternate OS/architecture combinations.
To build a Velero container image, first set the `$REGISTRY` environment variable. For example, if you want to build the `gcr.io/my-registry/velero:master` image, set `$REGISTRY` to `gcr.io/my-registry`. Optionally, set the `$VERSION` environment variable to change the image tag. Then, run:
```bash
make container
```
To push your image to a registry, run:
```bash
make push
```
@@ -82,7 +88,7 @@ Run `make update` to regenerate files if you make the following changes:
Run [generate-proto.sh][13] to regenerate files if you make the following changes:
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14] and compiler plugin `protoc-gen-go` version v1.0.0.
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14] and compiler plugin `protoc-gen-go` version v1.0.0.
### Cross compiling
@@ -106,7 +112,7 @@ files (clientset, listers, shared informers, docs) are up to date.
## 4. Run
### Prerequisites
### Prerequisites
When running Velero, you will need to ensure that you set up all of the following:
@@ -169,7 +175,7 @@ Using the `velero` binary that you've built, run `velero install`:
```bash
# velero install requires a credentials file to exist, but we will
# not be using it since we're running the server locally, so just
# not be using it since we're running the server locally, so just
# create an empty file to pass to the install command.
touch fake-credentials-file
@@ -181,8 +187,8 @@ velero install \
--secret-file ./fake-credentials-file
# 'velero install' creates an in-cluster deployment of the
# velero server using an official velero image, but we want
# to run the velero server on our local machine using the
# velero server using an official velero image, but we want
# to run the velero server on our local machine using the
# binary we built, so delete the in-cluster deployment.
kubectl --namespace velero delete deployment velero
@@ -205,15 +211,16 @@ rm fake-credentials-file
1. Ensure you've built a `velero` container image and either loaded it onto your cluster's nodes, or pushed it to a registry (see [build][3]).
1. Install Velero into the cluster (the example below assumes you're using AWS):
```bash
velero install \
--provider aws \
--image $YOUR_CONTAINER_IMAGE \
--bucket $BUCKET \
--backup-location-config region=$REGION \
--snapshot-location-config region=$REGION \
--secret-file $YOUR_AWS_CREDENTIALS_FILE
```
```bash
velero install \
--provider aws \
--image $YOUR_CONTAINER_IMAGE \
--bucket $BUCKET \
--backup-location-config region=$REGION \
--snapshot-location-config region=$REGION \
--secret-file $YOUR_AWS_CREDENTIALS_FILE
```
## 5. Vendoring dependencies

View File

@@ -3,7 +3,7 @@
## General
### `invalid configuration: no configuration has been provided`
This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the
This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the
following locations:
* the path specified by the `--kubeconfig` flag, if any
* the path specified by the `$KUBECONFIG` environment variable, if any
@@ -22,22 +22,25 @@ kubectl -n velero logs deployment/velero
### `NoCredentialProviders: no valid providers in chain`
#### Using credentials
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
into the Velero server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Velero server's namespace
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
* The `credentials-velero` file is formatted properly and has the correct values:
```
[default]
aws_access_key_id=<your AWS access key ID>
aws_secret_access_key=<your AWS secret access key>
```
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
#### Using kube2iam
This means that Velero can't read the content of the S3 bucket. Ensure the following:
* There is a Trust Policy document allowing the role used by kube2iam to assume Velero's role, as stated in the AWS config documentation.
* The new Velero role has all the permissions listed in the documentation regarding S3.
@@ -45,8 +48,9 @@ This means that Velero can't read the content of the S3 bucket. Ensure the follo
## Azure
### `Failed to refresh the Token` or `adal: Refresh request failed`
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
properly into the Velero server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Velero server's namespace
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
@@ -56,8 +60,9 @@ properly into the Velero server pod. Ensure the following:
## GCE/GKE
### `open credentials/cloud: no such file or directory`
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
into the Velero server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Velero server's namespace
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
* The `cloud-credentials` secret is defined as a volume for the Velero deployment

View File

@@ -16,10 +16,13 @@ backup-test-2-20170726180515 backup-test-2 Completed 0 1 2
```
To delve into the warnings and errors into more detail, you can use `velero restore describe`:
```
```bash
velero restore describe backup-test-20170726180512
```
The output looks like this:
```
Name: backup-test-20170726180512
Namespace: velero

View File

@@ -9,6 +9,7 @@ If you periodically back up your cluster's resources, you are able to return to
```
velero schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
```
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
1. A disaster happens and you need to recreate your resources.
@@ -16,9 +17,7 @@ If you periodically back up your cluster's resources, you are able to return to
1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
1. Create a restore with your most recent Velero Backup:
```
velero restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
```

View File

@@ -1,9 +1,9 @@
# Run Velero on GCP
You can run Kubernetes on Google Cloud Platform in either:
You can run Kubernetes on Google Cloud Platform in either:
* Kubernetes on Google Compute Engine virtual machines
* Google Kubernetes Engine
* Google Kubernetes Engine
If you do not have the `gcloud` and `gsutil` CLIs locally installed, follow the [user guide][16] to set them up.
@@ -16,9 +16,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -44,7 +46,7 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
```
Store the `project` value from the results in the environment variable `$PROJECT_ID`.
```bash
PROJECT_ID=$(gcloud config get-value project)
```
@@ -64,7 +66,7 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
```
Set the `$SERVICE_ACCOUNT_EMAIL` variable to match its `email` value.
```bash
SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \
--filter="displayName:Velero service account" \
@@ -74,7 +76,6 @@ To integrate Velero with GCP, create a Velero-specific [Service Account][15]:
3. Attach policies to give `velero` the necessary permissions to function:
```bash
ROLE_PERMISSIONS=(
compute.disks.get
compute.disks.create
@@ -136,14 +137,13 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
[0]: namespace.md
[7]: api-types/backupstoragelocation.md#gcp
[8]: api-types/volumesnapshotlocation.md#gcp
[15]: https://cloud.google.com/compute/docs/access/service-accounts
[16]: https://cloud.google.com/sdk/docs/
[20]: faq.md
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap
[0]: namespace.md
[7]: api-types/backupstoragelocation.md#gcp
[8]: api-types/volumesnapshotlocation.md#gcp
[15]: https://cloud.google.com/compute/docs/access/service-accounts
[16]: https://cloud.google.com/sdk/docs/
[20]: faq.md
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap

View File

@@ -1,8 +1,8 @@
## Getting started
The following example sets up the Velero server and client, then backs up and restores a sample application.
The following example sets up the Velero server and client, then backs up and restores a sample application.
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
For additional functionality with this setup, see the docs on how to [expose Minio outside your cluster][31].
**NOTE** The example lets you explore basic Velero functionality. Configuring Minio for production is out of scope.
@@ -26,9 +26,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -36,6 +38,7 @@ of the Velero repository is under active development and is not guaranteed to be
#### MacOS Installation
On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client:
```bash
brew install velero
```
@@ -54,9 +57,10 @@ These instructions start the Velero server and a Minio instance that is accessib
1. Start the server and the local storage service. In the Velero directory, run:
```bash
```
kubectl apply -f examples/minio/00-minio-deployment.yaml
```
```
velero install \
--provider aws \
--bucket velero \
@@ -91,11 +95,11 @@ These instructions start the Velero server and a Minio instance that is accessib
velero backup create nginx-backup --selector app=nginx
```
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
```
velero backup create nginx-backup --selector 'backup notin (ignore)'
```
```
velero backup create nginx-backup --selector 'backup notin (ignore)'
```
1. (Optional) Create regularly scheduled backups based on a cron expression using the `app=nginx` label selector:
@@ -126,7 +130,7 @@ These instructions start the Velero server and a Minio instance that is accessib
```
You should get no results.
NOTE: You might need to wait for a few minutes for the namespace to be fully cleaned up.
### Restore
@@ -210,21 +214,19 @@ You must also get the Minio URL, which you can then specify as the value of the
1. Get the Minio URL:
- if you're running Minikube:
- if you're running Minikube:
```shell
minikube service minio --namespace=velero --url
```
- in any other environment:
- in any other environment:
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
```shell
kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
```
```shell
kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
```
1. Edit your `BackupStorageLocation` YAML, adding `publicUrl: <URL_FROM_PREVIOUS_STEP>` as a field under `spec.config`. You must include the `http://` or `https://` prefix.
@@ -252,7 +254,7 @@ Add `publicUrl: http://localhost:9000` under the `spec.config` section.
Configuring Ingress for your cluster is out of scope for the Velero documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Velero configuration with Minio.
In this case:
In this case:
1. Keep the Service type as `ClusterIP`.

View File

@@ -18,22 +18,26 @@ You can use the following annotations on a pod to make Velero execute a hook whe
#### Pre hooks
| Annotation Name | Description |
| --- | --- |
| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
* `pre.hook.backup.velero.io/container`
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
* `pre.hook.backup.velero.io/command`
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
* `pre.hook.backup.velero.io/on-error`
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
* `pre.hook.backup.velero.io/timeout`
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
#### Post hooks
| Annotation Name | Description |
| --- | --- |
| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
* `post.hook.backup.velero.io/container`
* The container where the command should be executed. Defaults to the first container in the pod. Optional.
* `post.hook.backup.velero.io/command`
* The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]`
* `post.hook.backup.velero.io/on-error`
* What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional.
* `post.hook.backup.velero.io/timeout`
* How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional.
### Specifying Hooks in the Backup Spec

View File

@@ -1,5 +1,5 @@
# Use IBM Cloud Object Storage as Velero's storage destination.
You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups.
You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups.
To set up IBM Cloud Object Storage (COS) as Velero's destination, you:
@@ -18,9 +18,11 @@ Velero. The tarballs for each release contain the `velero` command-line client.
of the Velero repository is under active development and is not guaranteed to be stable!_
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
@@ -80,14 +82,14 @@ For more complex installation needs, use either the Helm chart, or add `--dry-ru
If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
[0]: namespace.md
[1]: https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#creating-a-new-resource-instance
[2]: https://console.bluemix.net/docs/services/cloud-object-storage/getting-started.html#create-buckets
[3]: https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials
[4]: https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/kc_welcome_containers.html
[5]: https://console.bluemix.net/docs/containers/container_index.html#container_index
[6]: api-types/backupstoragelocation.md#aws
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
[0]: namespace.md
[1]: https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#creating-a-new-resource-instance
[2]: https://console.bluemix.net/docs/services/cloud-object-storage/getting-started.html#create-buckets
[3]: https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials
[4]: https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/kc_welcome_containers.html
[5]: https://console.bluemix.net/docs/containers/container_index.html#container_index
[6]: api-types/backupstoragelocation.md#aws
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html

View File

@@ -17,17 +17,17 @@ For details, see the documentation topics for individual cloud providers.
The Velero client includes an `install` command to specify the settings for each supported cloud provider. You can install Velero for the included cloud providers using the following command:
```bash
velero install \
--provider <YOUR_PROVIDER> \
--bucket <YOUR_BUCKET> \
--secret-file <PATH_TO_FILE> \
[--backup-location-config]
[--snapshot-location-config]
[--namespace]
[--use-volume-snapshots]
[--use-restic]
velero install \
--provider <YOUR_PROVIDER> \
--bucket <YOUR_BUCKET> \
--secret-file <PATH_TO_FILE> \
[--backup-location-config]
[--snapshot-location-config]
[--namespace]
[--use-volume-snapshots]
[--use-restic]
```
For provider-specific instructions, see:
* [Run Velero on AWS][0]
@@ -43,10 +43,10 @@ For more complex installation needs, use either the generated YAML, or the Helm
## On-premises
You can run Velero in an on-premises cluster in different ways depending on your requirements.
You can run Velero in an on-premises cluster in different ways depending on your requirements.
First, you must select an object storage backend that Velero can use to store backup data. [Compatible Storage Providers][99] contains information on various
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
not using another storage platform that offers an S3-compatible object storage API.
Second, if you need to back up persistent volume data, you must select a volume backup solution. [Volume Snapshot Providers][100] contains information on

View File

@@ -76,6 +76,7 @@ velero backup-location create s3-alt-region \
```
During backup creation:
```shell
# The Velero server will automatically store backups in the backup storage location named "default" if
# one is not specified when creating the backup. You can alter which backup storage location is used
@@ -83,7 +84,9 @@ During backup creation:
# by the Velero deployment) to the name of a different backup storage location.
velero backup create full-cluster-backup
```
Or:
```shell
velero backup create full-cluster-alternate-location-backup \
--storage-location s3-alt-region
@@ -106,10 +109,10 @@ velero snapshot-location create portworx-cloud \
During backup creation:
```shell
# Note that since in this example we have two possible volume snapshot locations for the Portworx
# Note that since in this example we have two possible volume snapshot locations for the Portworx
# provider, we need to explicitly specify which one to use when creating a backup. Alternately,
# you can set the --default-volume-snapshot-locations flag on the `velero server` command (run by
# the Velero deployment) to specify which location should be used for each provider by default, in
# the Velero deployment) to specify which location should be used for each provider by default, in
# which case you don't need to specify it when creating a backup.
velero backup create local-snapshot-backup \
--volume-snapshot-locations portworx-local
@@ -140,8 +143,9 @@ velero snapshot-location create ebs-us-west-1 \
```
During backup creation:
```shell
# Velero will automatically use your configured backup storage location and volume snapshot location.
# Velero will automatically use your configured backup storage location and volume snapshot location.
# Nothing needs to be specified when creating a backup.
velero backup create full-cluster-backup
```

View File

@@ -9,6 +9,7 @@ Velero can help you port your resources from one cluster to another, as long as
```
velero backup create <BACKUP-NAME>
```
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Velero deployment YAML.

View File

@@ -7,7 +7,7 @@ First, ensure you've [downloaded & extracted the latest release][0].
Then, install Velero using the `--namespace` flag:
```bash
velero install --bucket <YOUR_BUCKET> --provider <YOUR_PROVIDER> --namespace <YOUR_NAMESPACE>
velero install --bucket <YOUR_BUCKET> --provider <YOUR_PROVIDER> --namespace <YOUR_NAMESPACE>
```
@@ -17,7 +17,7 @@ Then, install Velero using the `--namespace` flag:
To specify the namespace for all Velero client commands, run:
```bash
velero client config set namespace=<NAMESPACE_VALUE>
velero client config set namespace=<NAMESPACE_VALUE>
```

View File

@@ -15,6 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru
- a plugin with the same name cannot not already exist
### Some examples:
```
- example.io/azure
- 1.2.3.4/5678
@@ -34,9 +35,9 @@ Velero currently supports the following kinds of plugins:
## Plugin Logging
Velero provides a [logger][2] that can be used by plugins to log structured information to the main Velero server log or
per-backup/restore logs. It also passes a `--log-level` flag to each plugin binary, whose value is the value of the same
flag from the main Velero process. This means that if you turn on debug logging for the Velero server via `--log-level=debug`,
Velero provides a [logger][2] that can be used by plugins to log structured information to the main Velero server log or
per-backup/restore logs. It also passes a `--log-level` flag to each plugin binary, whose value is the value of the same
flag from the main Velero process. This means that if you turn on debug logging for the Velero server via `--log-level=debug`,
plugins will also emit debug-level logs. See the [sample repository][1] for an example of how to use the logger within your plugin.

View File

@@ -2,15 +2,15 @@
Velero has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called [restic][1]. This support is considered beta quality. Please see the list of [limitations](#limitations) to understand if it currently fits your use case.
Velero has always allowed you to take snapshots of persistent volumes as part of your backups if youre using one of
the supported cloud providers block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
We also provide a plugin model that enables anyone to implement additional object and block storage backends, outside the
Velero has always allowed you to take snapshots of persistent volumes as part of your backups if youre using one of
the supported cloud providers block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
We also provide a plugin model that enables anyone to implement additional object and block storage backends, outside the
main Velero repository.
We integrated restic with Velero so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes
volume*. This is a new capability for Velero, not a replacement for existing functionality. If you're running on AWS, and
taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you've
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
local, or any other volume type that doesn't have a native snapshot concept, restic might be for you.
Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable
@@ -30,20 +30,20 @@ Ensure you've [downloaded latest release][3].
To install restic, use the `--use-restic` flag on the `velero install` command. See the [install overview][2] for more details.
Please note: In RancherOS , the path is not `/var/lib/kubelet/pods` , rather it is `/opt/rke/var/lib/kubelet/pods`
thereby requires modifying the restic daemonset after installing.
Please note: In RancherOS , the path is not `/var/lib/kubelet/pods` , rather it is `/opt/rke/var/lib/kubelet/pods`
thereby requires modifying the restic daemonset after installing.
```yaml
hostPath:
path: /var/lib/kubelet/pods
```
```yaml
hostPath:
path: /var/lib/kubelet/pods
```
to
to
```yaml
hostPath:
path: /opt/rke/var/lib/kubelet/pods
```
```yaml
hostPath:
path: /opt/rke/var/lib/kubelet/pods
```
You're now ready to use Velero with restic.
@@ -76,7 +76,7 @@ You're now ready to use Velero with restic.
mountPath: /volume-2
volumes:
- name: pvc-volume
persistentVolumeClaim:
persistentVolumeClaim:
claimName: test-volume-claim
- name: emptydir-volume
emptyDir: {}
@@ -100,7 +100,8 @@ You're now ready to use Velero with restic.
```bash
velero backup describe YOUR_BACKUP_NAME
```
```bash
kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml
```
@@ -116,21 +117,22 @@ You're now ready to use Velero with restic.
```bash
velero restore describe YOUR_RESTORE_NAME
```
```bash
kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml
```
## Limitations
- `hostPath` volumes are not supported. [Local persistent volumes][4] are supported.
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
common encryption key for all restic repositories created by Velero. **This means that anyone who has access to your
bucket can decrypt your restic backup data**. Make sure that you limit access to the restic bucket
appropriately. We plan to implement full Velero backup encryption, including securing the restic encryption keys, in
appropriately. We plan to implement full Velero backup encryption, including securing the restic encryption keys, in
a future release.
- The current Velero/restic integration relies on using pod names to associate restic backups with their parents. If a pod is restarted, such as with a Deployment,
- The current Velero/restic integration relies on using pod names to associate restic backups with their parents. If a pod is restarted, such as with a Deployment,
the next restic backup taken will be treated as a completely new backup, not an incremental one.
- Restic scans each file in a single thread. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual
- Restic scans each file in a single thread. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual
difference is small.
## Customize Restore Helper Image
@@ -225,11 +227,11 @@ and `restic prune`.
- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Velero backup process creates
one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this
resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes
`restic backup` commands to backup pod volume data.
`restic backup` commands to backup pod volume data.
- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Velero restore process creates one
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
on that node. The controller executes `restic restore` commands to restore pod volume data.
### Backup

View File

@@ -16,8 +16,8 @@ _If you're sure that you do not have any backups that were originally created pr
We've added a CLI command to [Velero v0.11.1][1], `velero migrate-backups`, to help you with this. This command will:
- Replace `ark-backup.json` files in object storage with equivalent `velero-backup.json` files.
- Create `<backup-name>-volumesnapshots.json.gz` files in object storage if they don't already exist, containing snapshot metadata populated from the backups' `status.volumeBackups` field*.
- Replace `ark-backup.json` files in object storage with equivalent `velero-backup.json` files.
- Create `<backup-name>-volumesnapshots.json.gz` files in object storage if they don't already exist, containing snapshot metadata populated from the backups' `status.volumeBackups` field*.
_*backups created prior to v0.10 stored snapshot metadata in the `status.volumeBackups` field, but it has subsequently been replaced with the `<backup-name>-volumesnapshots.json.gz` file._
@@ -26,42 +26,54 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
1. Download the [v0.11.1 release tarball][1] tarball for your client platform.
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
1. Scale down your existing Velero deployment:
```bash
kubectl -n velero scale deployment/velero --replicas 0
```
1. Fetch velero's credentials for accessing your object storage bucket and store them locally for use by `velero migrate-backups`:
For AWS:
```bash
# for AWS
export AWS_SHARED_CREDENTIALS_FILE=./velero-migrate-backups-credentials
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $AWS_SHARED_CREDENTIALS_FILE
````
# for Azure
For Azure:
```bash
export AZURE_SUBSCRIPTION_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_SUBSCRIPTION_ID}" | base64 --decode)
export AZURE_TENANT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_TENANT_ID}" | base64 --decode)
export AZURE_CLIENT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_ID}" | base64 --decode)
export AZURE_CLIENT_SECRET=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_SECRET}" | base64 --decode)
export AZURE_RESOURCE_GROUP=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_RESOURCE_GROUP}" | base64 --decode)
```
# for GCP
For GCP:
```bash
export GOOGLE_APPLICATION_CREDENTIALS=./velero-migrate-backups-credentials
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $GOOGLE_APPLICATION_CREDENTIALS
```
1. List all of your backup storage locations:
```bash
velero backup-location get
```
1. For each backup storage location that you want to use with Velero 1.0, replace any legacy pre-v0.11 backup metadata with the equivalent current formats:
```bash
```
# - BACKUP_LOCATION_NAME is the name of a backup location from the previous step, whose
# backup metadata will be updated in object storage
# - SNAPSHOT_LOCATION_NAME is the name of the volume snapshot location that Velero should
@@ -73,24 +85,33 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
```
1. Scale up your deployment:
```bash
kubectl -n velero scale deployment/velero --replicas 1
```
1. Remove the local velero credentials:
```bash
# for AWS
1. Remove the local `velero` credentials:
For AWS:
```
rm $AWS_SHARED_CREDENTIALS_FILE
unset AWS_SHARED_CREDENTIALS_FILE
```
# for Azure
For Azure:
```
unset AZURE_SUBSCRIPTION_ID
unset AZURE_TENANT_ID
unset AZURE_CLIENT_ID
unset AZURE_CLIENT_SECRET
unset AZURE_RESOURCE_GROUP
```
# for GCP
For GCP:
```
rm $GOOGLE_APPLICATION_CREDENTIALS
unset GOOGLE_APPLICATION_CREDENTIALS
```
@@ -104,13 +125,15 @@ _*backups created prior to v0.10 stored snapshot metadata in the `status.volumeB
1. Download the [v1.0 release tarball][2] tarball for your client platform.
1. Extract the tarball:
```bash
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
```
1. Move the `velero` binary from the Velero directory to somewhere in your PATH, replacing any existing pre-1.0 `velero` binaries.
1. Update the image for the Velero deployment and daemon set (if applicable):
```bash
kubectl -n velero set image deployment/velero velero=gcr.io/heptio-images/velero:v1.0.0
kubectl -n velero set image daemonset/restic restic=gcr.io/heptio-images/velero:v1.0.0