mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-15 17:22:52 +00:00
Compare commits
197 Commits
main
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d8417b2c5 | ||
|
|
26ca08b1f5 | ||
|
|
dd5f74d53e | ||
|
|
dcdc32f16f | ||
|
|
5b07cbb97f | ||
|
|
a3bb4d2947 | ||
|
|
d71b298ceb | ||
|
|
9e35ecd47a | ||
|
|
341d8006af | ||
|
|
e0b437e2e1 | ||
|
|
5f32403968 | ||
|
|
4d1c20bbab | ||
|
|
3c379edcd0 | ||
|
|
7295609bd6 | ||
|
|
21667a14b4 | ||
|
|
bd0ea3a648 | ||
|
|
509ceaafbf | ||
|
|
cd9b5ed37b | ||
|
|
1cc6f74341 | ||
|
|
50a12217e4 | ||
|
|
85189a0dc6 | ||
|
|
c3aecbb761 | ||
|
|
6886ff6643 | ||
|
|
165cd5e908 | ||
|
|
ce00387ccf | ||
|
|
684f71306e | ||
|
|
805ec1d3a6 | ||
|
|
4d544ac3bb | ||
|
|
5a178b88d1 | ||
|
|
03bad256b0 | ||
|
|
cf7074f943 | ||
|
|
0722927df2 | ||
|
|
106e802f1e | ||
|
|
9a093a7b75 | ||
|
|
09ff5af208 | ||
|
|
1da7f30fba | ||
|
|
dd2553916e | ||
|
|
6ff9dfab33 | ||
|
|
c3b5b1fadd | ||
|
|
568e5bf04a | ||
|
|
5ff8a4f2a3 | ||
|
|
33e3c9589b | ||
|
|
ee7b60f723 | ||
|
|
ecb8e80f30 | ||
|
|
a80981524b | ||
|
|
5e71b38993 | ||
|
|
06d12dec40 | ||
|
|
b68486221d | ||
|
|
5abd318b2c | ||
|
|
7c051514fd | ||
|
|
c8fd9d4d62 | ||
|
|
ccfbcc5455 | ||
|
|
ea25b8a793 | ||
|
|
2d6578635d | ||
|
|
fc44f3b8f0 | ||
|
|
df72745909 | ||
|
|
453bd93c90 | ||
|
|
65939c920e | ||
|
|
c042d477ab | ||
|
|
5c44ed49a5 | ||
|
|
3325a0cd1b | ||
|
|
b2d3fa0bec | ||
|
|
25fc2f4d6e | ||
|
|
a036e8d463 | ||
|
|
f92cdb1f76 | ||
|
|
0531dbb1a2 | ||
|
|
de55794381 | ||
|
|
d7b4b0a770 | ||
|
|
f1c93bd6c4 | ||
|
|
06e3773b22 | ||
|
|
32a8bbb9ac | ||
|
|
84d8bbda24 | ||
|
|
86e34eec28 | ||
|
|
5923046471 | ||
|
|
d1399225da | ||
|
|
88732e9b51 | ||
|
|
dd9de76381 | ||
|
|
6438fc9a69 | ||
|
|
a674a1eaff | ||
|
|
bb4f9094fd | ||
|
|
1264c438c1 | ||
|
|
7e35fd3261 | ||
|
|
482ec13d38 | ||
|
|
dd825ef8bb | ||
|
|
dc525aa045 | ||
|
|
36ad5dafa9 | ||
|
|
7b76047596 | ||
|
|
f1fcec3514 | ||
|
|
17ad487803 | ||
|
|
bb6c1f60ea | ||
|
|
0be6ad3a06 | ||
|
|
1c462d5f6d | ||
|
|
32deef7ae3 | ||
|
|
72b5e7aad6 | ||
|
|
5c4fdfe147 | ||
|
|
226237bab4 | ||
|
|
bbc9790316 | ||
|
|
10744ec516 | ||
|
|
905cd43140 | ||
|
|
3034cdb448 | ||
|
|
353ff55e42 | ||
|
|
468017d7db | ||
|
|
6bf705fd25 | ||
|
|
7a909d8ff5 | ||
|
|
ef1b9816b2 | ||
|
|
457fcc6893 | ||
|
|
b498847b5b | ||
|
|
af9697814e | ||
|
|
d92a051795 | ||
|
|
a3cb39d62e | ||
|
|
c1ace31466 | ||
|
|
8bf98e8895 | ||
|
|
e53cfdf85e | ||
|
|
d93cc9094a | ||
|
|
15dd67e203 | ||
|
|
877592194b | ||
|
|
17b495fcfd | ||
|
|
b99a59480d | ||
|
|
a789976a03 | ||
|
|
52878de077 | ||
|
|
432a5fe566 | ||
|
|
175047baa9 | ||
|
|
0eaf14ed19 | ||
|
|
c415fd4bcc | ||
|
|
554403df5c | ||
|
|
aba64ba151 | ||
|
|
3a410c9f04 | ||
|
|
2f92f78be5 | ||
|
|
9d5dd8e09d | ||
|
|
6103073551 | ||
|
|
83f892d81f | ||
|
|
2cd15f1e4b | ||
|
|
27a89df34d | ||
|
|
e4c2b2b157 | ||
|
|
edefe7a63b | ||
|
|
a097094bcf | ||
|
|
bc4dc6c0c8 | ||
|
|
343e54f1b8 | ||
|
|
08d44b02a8 | ||
|
|
a8c76a4a00 | ||
|
|
0623ac363a | ||
|
|
1aea12a80c | ||
|
|
7112c62e49 | ||
|
|
dcb891a307 | ||
|
|
21353f00a8 | ||
|
|
5e7114899b | ||
|
|
b035680ce6 | ||
|
|
9eb133e635 | ||
|
|
6f1262d4c6 | ||
|
|
48e3278c6c | ||
|
|
acfc6e474f | ||
|
|
993d2c775f | ||
|
|
b70b01cde9 | ||
|
|
8b8a5a2bcc | ||
|
|
5b36cd7e83 | ||
|
|
3240fb196c | ||
|
|
d9859d99ba | ||
|
|
18d4fe45e8 | ||
|
|
60d5bb22f7 | ||
|
|
9468b8cfa9 | ||
|
|
420562111b | ||
|
|
cf0b2e9139 | ||
|
|
506415e60c | ||
|
|
3733a40637 | ||
|
|
fe1ade0226 | ||
|
|
86e1a74937 | ||
|
|
6260a44e62 | ||
|
|
06d9bfae8d | ||
|
|
4d1617470f | ||
|
|
1b2c82c9eb | ||
|
|
040060082a | ||
|
|
fc653bdfbe | ||
|
|
6790a18814 | ||
|
|
93995bfd00 | ||
|
|
80572934dc | ||
|
|
41d9b67945 | ||
|
|
a06107ac70 | ||
|
|
40a94e39ad | ||
|
|
7ea0d434d6 | ||
|
|
6b884ecc39 | ||
|
|
183f7ac154 | ||
|
|
75bda412a1 | ||
|
|
a2eb10df8f | ||
|
|
90bc1abd21 | ||
|
|
45165503ba | ||
|
|
53530130a5 | ||
|
|
ed256d74dd | ||
|
|
ab28a09a07 | ||
|
|
90f4cc5497 | ||
|
|
f505ed709b | ||
|
|
28074e3f37 | ||
|
|
240f33c09d | ||
|
|
fd08848471 | ||
|
|
5f585be24b | ||
|
|
5480acf0a0 | ||
|
|
e2d3e84bab | ||
|
|
0c0ccf949b |
3
.github/auto-assignees.yml
vendored
3
.github/auto-assignees.yml
vendored
@@ -13,10 +13,9 @@ reviewers:
|
||||
- reasonerjt
|
||||
- ywk253100
|
||||
- blackpiglet
|
||||
- qiuming-best
|
||||
- shubham-pampattiwar
|
||||
- Lyndon-Li
|
||||
- anshulahuja98
|
||||
- kaovilai
|
||||
|
||||
tech-writer:
|
||||
- sseago
|
||||
|
||||
9
.github/dependabot.yml
vendored
9
.github/dependabot.yml
vendored
@@ -1,14 +1,5 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Dependencies listed in .github/workflows
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels:
|
||||
- "Dependencies"
|
||||
- "github_actions"
|
||||
- "kind/changelog-not-required"
|
||||
# Dependencies listed in go.mod
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/" # Location of package manifests
|
||||
|
||||
33
.github/labeler.yml
vendored
33
.github/labeler.yml
vendored
@@ -1,33 +0,0 @@
|
||||
# This file is used by Auto Label PRs action.
|
||||
# Works with https://github.com/actions/labeler/
|
||||
# Below this line, the keys are labels to be applied, and the values are the file globs to match against.
|
||||
# Anything in the `design` directory gets the `Design` label.
|
||||
Area/Design:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: design/*
|
||||
# Anything that has plugin infra will be labeled.
|
||||
# Individual plugins don't necessarily live here, though
|
||||
Area/Plugins:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: pkg/plugins/**/*
|
||||
Dependencies:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: go.mod
|
||||
Documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: site/content/docs/**/*
|
||||
# Anything in the site directory gets the website label *EXCEPT* docs
|
||||
Website:
|
||||
- all:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: site/**/*
|
||||
- all-globs-to-all-files: '!site/content/docs/**/*'
|
||||
has-changelog:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: changelogs/**
|
||||
has-e2e-2tests:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: test/e2e/**/*
|
||||
has-unit-tests:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: pkg/**/*_test.go
|
||||
43
.github/labels.yaml
vendored
43
.github/labels.yaml
vendored
@@ -1,43 +0,0 @@
|
||||
# This file is used by [prow github action](https://github.com/jpmcb/prow-github-actions/) in .github/workflows/prow-action.yml.
|
||||
# This file only has values for kind and area commands.
|
||||
area:
|
||||
- CLI
|
||||
- CSI
|
||||
- Cloud/AWS
|
||||
- Cloud/Azure
|
||||
- Cloud/DigitalOcean
|
||||
- Cloud/GCP
|
||||
- Cloud/vSphere
|
||||
- Design
|
||||
- Documentation
|
||||
- Filters
|
||||
- Plugins
|
||||
- Process
|
||||
- Storage/Minio
|
||||
- Storage/Cinder
|
||||
- WindowsSupport
|
||||
- datamover
|
||||
- fs-backup
|
||||
- fs-backup/deletion
|
||||
- fs-backup/file-selectable
|
||||
- fs-uploader
|
||||
- kopia-integration
|
||||
- migration
|
||||
- multi-tenancy
|
||||
- progress-monitoring
|
||||
- resilience
|
||||
- schedule
|
||||
- storage/IBM-ObjectStorage
|
||||
- upgrade
|
||||
- volume-snapshot-dm
|
||||
kind:
|
||||
- changelog-not-required
|
||||
- question
|
||||
- refactor
|
||||
- requirement
|
||||
- release-note
|
||||
- release-blocker
|
||||
- spike
|
||||
- tech-debt
|
||||
- usage-error
|
||||
- voting
|
||||
41
.github/labels.yml
vendored
Normal file
41
.github/labels.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
area:
|
||||
- "Cloud/AWS"
|
||||
- "Cloud/GCP"
|
||||
- "Cloud/Azure"
|
||||
- "Design"
|
||||
- "Plugins"
|
||||
|
||||
# Labels that can be applied to PRs with the /kind command
|
||||
kind:
|
||||
- "changelog-not-required"
|
||||
- "tech-debt"
|
||||
|
||||
# Works with https://github.com/actions/labeler/
|
||||
# Below this line, the keys are labels to be applied, and the values are the file globs to match against.
|
||||
# Anything in the `design` directory gets the `Design` label.
|
||||
Area/Design:
|
||||
- design/*
|
||||
|
||||
# Anything in the site directory gets the website label *EXCEPT* docs
|
||||
Website:
|
||||
- any: ["site/**/*", "!site/content/docs/**/*"]
|
||||
|
||||
Documentation:
|
||||
- site/content/docs/**/*
|
||||
|
||||
Dependencies:
|
||||
- go.mod
|
||||
|
||||
# Anything that has plugin infra will be labeled.
|
||||
# Individual plugins don't necessarily live here, though
|
||||
Area/Plugins:
|
||||
- "pkg/plugins/**/*"
|
||||
|
||||
has-unit-tests:
|
||||
- "pkg/**/*_test.go"
|
||||
|
||||
has-e2e-2tests:
|
||||
- "test/e2e/**/*"
|
||||
|
||||
has-changelog:
|
||||
- "changelogs/**"
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -9,5 +9,5 @@ Fixes #(issue)
|
||||
# Please indicate you've done the following:
|
||||
|
||||
- [ ] [Accepted the DCO](https://velero.io/docs/v1.5/code-standards/#dco-sign-off). Commits without the DCO will delay acceptance.
|
||||
- [ ] [Created a changelog file (`make new-changelog`)](https://velero.io/docs/main/code-standards/#adding-a-changelog) or comment `/kind changelog-not-required` on this PR.
|
||||
- [ ] [Created a changelog file](https://velero.io/docs/v1.5/code-standards/#adding-a-changelog) or added `/kind changelog-not-required` as a comment on this pull request.
|
||||
- [ ] Updated the corresponding documentation in `site/content/docs/main`.
|
||||
|
||||
2
.github/workflows/auto_assign_prs.yml
vendored
2
.github/workflows/auto_assign_prs.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set the author of a PR as the assignee
|
||||
uses: kentaro-m/auto-assign-action@v2.0.0
|
||||
uses: kentaro-m/auto-assign-action@v1.1.1
|
||||
with:
|
||||
configuration-path: ".github/auto-assignees.yml"
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
4
.github/workflows/auto_label_prs.yml
vendored
4
.github/workflows/auto_label_prs.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
- uses: actions/labeler@v3
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/labeler.yml
|
||||
configuration-path: .github/labels.yml
|
||||
|
||||
2
.github/workflows/auto_request_review.yml
vendored
2
.github/workflows/auto_request_review.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Request a PR review based on files types/paths, and/or groups the author belongs to
|
||||
uses: necojackarc/auto-request-review@v0.13.0
|
||||
uses: necojackarc/auto-request-review@v0.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
config: .github/auto-assignees.yml
|
||||
|
||||
93
.github/workflows/crds-verify-kind.yaml
vendored
Normal file
93
.github/workflows/crds-verify-kind.yaml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: "Verify Velero CRDs across k8s versions"
|
||||
on:
|
||||
pull_request:
|
||||
# Do not run when the change only includes these directories.
|
||||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
|
||||
jobs:
|
||||
# Build the Velero CLI once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cache
|
||||
uses: actions/cache@v2
|
||||
env:
|
||||
cache-name: cache-velero-cli
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
# The cache key a combination of the current PR number, and a SHA256 hash of the Velero binary
|
||||
key: velero-${{ github.event.pull_request.number }}-${{ hashFiles('./_output/bin/linux/amd64/velero') }}
|
||||
# This key controls the prefixes that we'll look at in the cache to restore from
|
||||
restore-keys: |
|
||||
velero-${{ github.event.pull_request.number }}-
|
||||
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v2
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
|
||||
# If no binaries were built for this PR, build it now.
|
||||
- name: Build Velero CLI
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
make local
|
||||
|
||||
# Check the common CLI against all Kubernetes versions
|
||||
crd-check:
|
||||
needs: build-cli
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# Latest k8s versions. There's no series-based tag, nor is there a latest tag.
|
||||
k8s:
|
||||
- 1.19.7
|
||||
- 1.20.2
|
||||
- 1.21.1
|
||||
- 1.22.0
|
||||
- 1.23.6
|
||||
- 1.24.2
|
||||
- 1.25.3
|
||||
# All steps run in parallel unless otherwise specified.
|
||||
# See https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#creating-dependent-jobs
|
||||
steps:
|
||||
- name: Fetch built CLI
|
||||
id: cache
|
||||
uses: actions/cache@v2
|
||||
env:
|
||||
cache-name: cache-velero-cli
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
# The cache key a combination of the current PR number, and a SHA256 hash of the Velero binary
|
||||
key: velero-${{ github.event.pull_request.number }}-${{ hashFiles('./_output/bin/linux/amd64/velero') }}
|
||||
# This key controls the prefixes that we'll look at in the cache to restore from
|
||||
restore-keys: |
|
||||
velero-${{ github.event.pull_request.number }}-
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: "v0.17.0"
|
||||
image: "kindest/node:v${{ matrix.k8s }}"
|
||||
- name: Install CRDs
|
||||
run: |
|
||||
kubectl cluster-info
|
||||
kubectl get pods -n kube-system
|
||||
kubectl version
|
||||
echo "current-context:" $(kubectl config current-context)
|
||||
echo "environment-kubeconfig:" ${KUBECONFIG}
|
||||
./_output/bin/linux/amd64/velero install --crds-only --dry-run -oyaml | kubectl apply -f -
|
||||
175
.github/workflows/e2e-test-kind.yaml
vendored
175
.github/workflows/e2e-test-kind.yaml
vendored
@@ -6,43 +6,42 @@ on:
|
||||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
outputs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
# The cache key a combination of the current PR number and the commit SHA
|
||||
key: velero-cli-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch built image
|
||||
id: image-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./velero.tar
|
||||
# The cache key a combination of the current PR number and the commit SHA
|
||||
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v2
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true' || steps.image-cache.outputs.cache-hit != 'true'
|
||||
# If no binaries were built for this PR, build it now.
|
||||
- name: Build Velero CLI
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true'
|
||||
@@ -52,107 +51,61 @@ jobs:
|
||||
- name: Build Velero Image
|
||||
if: steps.image-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
|
||||
docker save velero:pr-test-linux-amd64 -o ./velero.tar
|
||||
# Check and build MinIO image once for all e2e tests
|
||||
- name: Check Bitnami MinIO Dockerfile version
|
||||
id: minio-version
|
||||
run: |
|
||||
DOCKERFILE_SHA=$(curl -s https://api.github.com/repos/bitnami/containers/commits?path=bitnami/minio/2025/debian-12/Dockerfile\&per_page=1 | jq -r '.[0].sha')
|
||||
echo "dockerfile_sha=${DOCKERFILE_SHA}" >> $GITHUB_OUTPUT
|
||||
- name: Cache MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
- name: Build MinIO Image from Bitnami Dockerfile
|
||||
if: steps.minio-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Building MinIO image from Bitnami Dockerfile..."
|
||||
git clone --depth 1 https://github.com/bitnami/containers.git /tmp/bitnami-containers
|
||||
cd /tmp/bitnami-containers/bitnami/minio/2025/debian-12
|
||||
docker build -t bitnami/minio:local .
|
||||
docker save bitnami/minio:local > ${{ github.workspace }}/minio-image.tar
|
||||
# Create json of k8s versions to test
|
||||
# from guide: https://stackoverflow.com/a/65094398/4590470
|
||||
setup-test-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Set k8s versions
|
||||
id: set-matrix
|
||||
# everything excluding older tags. limits needs to be high enough to cover all latest versions
|
||||
# and test labels
|
||||
# grep -E "v[1-9]\.(2[5-9]|[3-9][0-9])" filters for v1.25 to v9.99
|
||||
# and removes older patches of the same minor version
|
||||
# awk -F. '{if(!a[$1"."$2]++)print $1"."$2"."$NF}'
|
||||
run: |
|
||||
echo "matrix={\
|
||||
\"k8s\":$(wget -q -O - "https://hub.docker.com/v2/namespaces/kindest/repositories/node/tags?page_size=50" | grep -o '"name": *"[^"]*' | grep -o '[^"]*$' | grep -v -E "alpha|beta" | grep -E "v[1-9]\.(2[5-9]|[3-9][0-9])" | awk -F. '{if(!a[$1"."$2]++)print $1"."$2"."$NF}' | sort -r | sed s/v//g | jq -R -c -s 'split("\n")[:-1]'),\
|
||||
\"labels\":[\
|
||||
\"Basic && (ClusterResource || NodePort || StorageClass)\", \
|
||||
\"ResourceFiltering && !Restic\", \
|
||||
\"ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources\", \
|
||||
\"(NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)\"\
|
||||
]}" >> $GITHUB_OUTPUT
|
||||
|
||||
IMAGE=velero VERSION=pr-test make container
|
||||
docker save velero:pr-test -o ./velero.tar
|
||||
# Run E2E test against all Kubernetes versions on kind
|
||||
run-e2e-test:
|
||||
needs:
|
||||
- build
|
||||
- setup-test-matrix
|
||||
- get-go-version
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
|
||||
matrix:
|
||||
k8s:
|
||||
- 1.19.16
|
||||
- 1.20.15
|
||||
- 1.21.12
|
||||
- 1.22.9
|
||||
- 1.23.6
|
||||
- 1.24.0
|
||||
- 1.25.3
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
# Fetch the pre-built MinIO image from the build job
|
||||
- name: Fetch built MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ needs.build.outputs.minio-dockerfile-sha }}
|
||||
- name: Load MinIO Image
|
||||
run: |
|
||||
echo "Loading MinIO image..."
|
||||
docker load < ./minio-image.tar
|
||||
uses: actions/checkout@v2
|
||||
- name: Install MinIO
|
||||
run: |
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ROOT_USER=minio" -e "MINIO_ROOT_PASSWORD=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:local
|
||||
- uses: engineerd/setup-kind@v0.6.2
|
||||
run:
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
skipClusterLogsExport: true
|
||||
version: "v0.27.0"
|
||||
version: "v0.17.0"
|
||||
image: "kindest/node:v${{ matrix.k8s }}"
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
key: velero-cli-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch built Image
|
||||
id: image-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./velero.tar
|
||||
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Load Velero Image
|
||||
run:
|
||||
kind load image-archive velero.tar
|
||||
# always try to fetch the cached go modules as the e2e test needs it either
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Run E2E test
|
||||
run: |
|
||||
cat << EOF > /tmp/credential
|
||||
@@ -165,27 +118,17 @@ jobs:
|
||||
curl -LO https://dl.k8s.io/release/v${{ matrix.k8s }}/bin/linux/amd64/kubectl
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
|
||||
git clone https://github.com/vmware-tanzu-experiments/distributed-data-generator.git -b main /tmp/kibishii
|
||||
|
||||
GOPATH=~/go \
|
||||
CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws \
|
||||
BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
CREDS_FILE=/tmp/credential \
|
||||
BSL_BUCKET=bucket \
|
||||
ADDITIONAL_OBJECT_STORE_PROVIDER=aws \
|
||||
ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential \
|
||||
ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
VELERO_IMAGE=velero:pr-test-linux-amd64 \
|
||||
PLUGINS=velero/velero-plugin-for-aws:latest \
|
||||
GINKGO_LABELS="${{ matrix.labels }}" \
|
||||
KIBISHII_DIRECTORY=/tmp/kibishii/kubernetes/yaml/ \
|
||||
make -C test/ run-e2e
|
||||
GOPATH=~/go CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
CREDS_FILE=/tmp/credential BSL_BUCKET=bucket \
|
||||
ADDITIONAL_OBJECT_STORE_PROVIDER=aws ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
GINKGO_FOCUS='Basic\]\[ClusterResource' VELERO_IMAGE=velero:pr-test \
|
||||
make -C test/e2e run
|
||||
timeout-minutes: 30
|
||||
- name: Upload debug bundle
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: DebugBundle-k8s-${{ matrix.k8s }}-job-${{ strategy.job-index }}
|
||||
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
|
||||
name: DebugBundle
|
||||
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
|
||||
33
.github/workflows/get-go-version.yaml
vendored
33
.github/workflows/get-go-version.yaml
vendored
@@ -1,33 +0,0 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: "The target branch's ref"
|
||||
required: true
|
||||
type: string
|
||||
outputs:
|
||||
version:
|
||||
description: "The expected Go version"
|
||||
value: ${{ jobs.extract.outputs.version }}
|
||||
|
||||
jobs:
|
||||
extract:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.pick-version.outputs.version }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- id: pick-version
|
||||
run: |
|
||||
if [ "${{ inputs.ref }}" == "main" ]; then
|
||||
version=$(grep '^go ' go.mod | awk '{print $2}' | cut -d. -f1-2)
|
||||
else
|
||||
goDirectiveVersion=$(grep '^go ' go.mod | awk '{print $2}')
|
||||
toolChainVersion=$(grep '^toolchain ' go.mod | awk '{print $2}')
|
||||
version=$(printf "%s\n%s\n" "$goDirectiveVersion" "$toolChainVersion" | sort -V | tail -n1)
|
||||
fi
|
||||
|
||||
echo "version=$version"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
6
.github/workflows/nightly-trivy-scan.yml
vendored
6
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -13,13 +13,13 @@ jobs:
|
||||
# maintain the versions of Velero those need security scan
|
||||
versions: [main]
|
||||
# list of images that need scan
|
||||
images: [velero, velero-plugin-for-aws, velero-plugin-for-gcp, velero-plugin-for-microsoft-azure]
|
||||
images: [velero, velero-restore-helper]
|
||||
permissions:
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
@@ -31,6 +31,6 @@ jobs:
|
||||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
2
.github/workflows/pr-changelog-check.yml
vendored
2
.github/workflows/pr-changelog-check.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Changelog check
|
||||
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'kind/changelog-not-required') || contains(github.event.pull_request.labels.*.name, 'Design') || contains(github.event.pull_request.labels.*.name, 'Website') || contains(github.event.pull_request.labels.*.name, 'Documentation'))}}
|
||||
|
||||
28
.github/workflows/pr-ci-check.yml
vendored
28
.github/workflows/pr-ci-check.yml
vendored
@@ -1,30 +1,30 @@
|
||||
name: Pull Request CI Check
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run CI
|
||||
needs: get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
|
||||
6
.github/workflows/pr-codespell.yml
vendored
6
.github/workflows/pr-codespell.yml
vendored
@@ -8,14 +8,14 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elsewhere.
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
|
||||
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast,notin,sme,optin,sie
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast
|
||||
check_filenames: true
|
||||
check_hidden: true
|
||||
|
||||
|
||||
6
.github/workflows/pr-containers.yml
vendored
6
.github/workflows/pr-containers.yml
vendored
@@ -13,18 +13,18 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v3
|
||||
name: Checkout
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
|
||||
2
.github/workflows/pr-goreleaser.yml
vendored
2
.github/workflows/pr-goreleaser.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v3
|
||||
name: Checkout
|
||||
|
||||
- name: Verify .goreleaser.yml and try a dryrun release.
|
||||
|
||||
28
.github/workflows/pr-linter-check.yml
vendored
28
.github/workflows/pr-linter-check.yml
vendored
@@ -1,32 +1,14 @@
|
||||
name: Pull Request Linter Check
|
||||
on:
|
||||
pull_request:
|
||||
# Do not run when the change only includes these directories.
|
||||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run Linter Check
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.5.0
|
||||
args: --verbose
|
||||
- name: Linter check
|
||||
run: make lint
|
||||
|
||||
19
.github/workflows/prow-action.yml
vendored
19
.github/workflows/prow-action.yml
vendored
@@ -9,21 +9,12 @@ jobs:
|
||||
execute:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jpmcb/prow-github-actions@v1.1.3
|
||||
- uses: jpmcb/prow-github-actions@v1.1.2
|
||||
with:
|
||||
# Only support /kind command for now.
|
||||
# TODO: before allowing the /lgtm command, see if we can block merging if changelog labels are missing.
|
||||
prow-commands: |
|
||||
/approve
|
||||
/area
|
||||
/assign
|
||||
/cc
|
||||
/close
|
||||
/hold
|
||||
prow-commands: "/area
|
||||
/kind
|
||||
/milestone
|
||||
/retitle
|
||||
/remove
|
||||
/reopen
|
||||
/uncc
|
||||
/unassign
|
||||
/cc
|
||||
/uncc"
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
2
.github/workflows/push-builder.yml
vendored
2
.github/workflows/push-builder.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
|
||||
# there are at least two commits: the first one is the merge commit and the second one is the real commit
|
||||
|
||||
128
.github/workflows/push.yml
vendored
128
.github/workflows/push.yml
vendored
@@ -9,55 +9,95 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.ref_name }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.10'
|
||||
id: go
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: all
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Build
|
||||
run: |
|
||||
make local
|
||||
# Clean go cache to ease the build environment storage pressure.
|
||||
go clean -modcache -cache
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo rm -f /mnt/swapfile
|
||||
docker system prune -a --force
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# Fix issue of setup-gcloud
|
||||
- run: |
|
||||
sudo apt-get install python2.7
|
||||
export CLOUDSDK_PYTHON="/usr/bin/python2"
|
||||
|
||||
- uses: google-github-actions/setup-gcloud@v0
|
||||
with:
|
||||
version: '285.0.0'
|
||||
service_account_key: ${{ secrets.GCS_SA_KEY }}
|
||||
export_default_credentials: true
|
||||
- run: gcloud info
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make local
|
||||
# Clean go cache to ease the build environment storage pressure.
|
||||
go clean -modcache -cache
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v2'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo rm -f /mnt/swapfile
|
||||
docker image prune -a --force
|
||||
|
||||
# Build and push Velero image to docker registry
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
./hack/docker-push.sh
|
||||
# Build and push Velero image to docker registry
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
VERSION=$(./hack/docker-push.sh | grep 'VERSION:' | awk -F: '{print $2}' | xargs)
|
||||
|
||||
# Upload Velero image package to GCS
|
||||
source hack/ci/build_util.sh
|
||||
BIN=velero
|
||||
RESTORE_HELPER_BIN=velero-restore-helper
|
||||
GCS_BUCKET=velero-builds
|
||||
VELERO_IMAGE=${BIN}-${VERSION}
|
||||
VELERO_RESTORE_HELPER_IMAGE=${RESTORE_HELPER_BIN}-${VERSION}
|
||||
VELERO_IMAGE_FILE=${VELERO_IMAGE}.tar.gz
|
||||
VELERO_RESTORE_HELPER_IMAGE_FILE=${VELERO_RESTORE_HELPER_IMAGE}.tar.gz
|
||||
VELERO_IMAGE_BACKUP_FILE=${VELERO_IMAGE}-'build.'${GITHUB_RUN_NUMBER}.tar.gz
|
||||
VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE=${VELERO_RESTORE_HELPER_IMAGE}-'build.'${GITHUB_RUN_NUMBER}.tar.gz
|
||||
|
||||
cp ${VELERO_IMAGE_FILE} ${VELERO_IMAGE_BACKUP_FILE}
|
||||
cp ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE}
|
||||
|
||||
uploader ${VELERO_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
|
||||
4
.github/workflows/rebase.yml
vendored
4
.github/workflows/rebase.yml
vendored
@@ -9,10 +9,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the latest code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Automatic Rebase
|
||||
uses: cirrus-actions/rebase@1.8
|
||||
uses: cirrus-actions/rebase@1.3.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/stale-issues.yml
vendored
4
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v10.1.1
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
@@ -20,4 +20,4 @@ jobs:
|
||||
days-before-pr-close: -1
|
||||
# Only issues made after Feb 09 2021.
|
||||
start-date: "2021-09-02T00:00:00"
|
||||
exempt-issue-labels: "Epic,Area/CLI,Area/Cloud/AWS,Area/Cloud/Azure,Area/Cloud/GCP,Area/Cloud/vSphere,Area/CSI,Area/Design,Area/Documentation,Area/Plugins,Bug,Enhancement/User,kind/requirement,kind/refactor,kind/tech-debt,limitation,Needs investigation,Needs triage,Needs Product,P0 - Hair on fire,P1 - Important,P2 - Long-term important,P3 - Wouldn't it be nice if...,Product Requirements,Restic - GA,Restic,release-blocker,Security,backlog"
|
||||
exempt-issue-labels: "Epic,Area/CLI,Area/Cloud/AWS,Area/Cloud/Azure,Area/Cloud/GCP,Area/Cloud/vSphere,Area/CSI,Area/Design,Area/Documentation,Area/Plugins,Bug,Enhancement/User,kind/requirement,kind/refactor,kind/tech-debt,limitation,Needs investigation,Needs triage,Needs Product,P0 - Hair on fire,P1 - Important,P2 - Long-term important,P3 - Wouldn't it be nice if...,Product Requirements,Restic - GA,Restic,release-blocker,Security"
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -53,13 +53,4 @@ tilt-resources/cloud
|
||||
# test generated files
|
||||
test/e2e/report.xml
|
||||
coverage.out
|
||||
__debug_bin*
|
||||
debug.test*
|
||||
|
||||
# make lint cache
|
||||
.cache/
|
||||
|
||||
# Go telemetry directory created when container sets HOME to working directory
|
||||
# This happens because Makefile uses 'docker run -w /github.com/vmware-tanzu/velero'
|
||||
# and Go's os.UserConfigDir() falls back to $HOME/.config when XDG_CONFIG_HOME is unset
|
||||
.config/
|
||||
__debug_bin*
|
||||
438
.golangci.yaml
438
.golangci.yaml
@@ -1,438 +0,0 @@
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
|
||||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 0
|
||||
timeout: 20m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
# to go.mod are needed. This setting is most useful to check that go.mod does
|
||||
# not need updates, such as in a continuous integration and testing system.
|
||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
# directory holds the correct copies of dependencies and ignores
|
||||
# the dependency descriptions in go.mod.
|
||||
# modules-download-mode: readonly|release|vendor
|
||||
modules-download-mode: readonly
|
||||
|
||||
# Allow multiple parallel golangci-lint instances running.
|
||||
# If false (default) - golangci-lint acquires file lock on start.
|
||||
allow-parallel-runners: false
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
formats:
|
||||
text:
|
||||
path: stdout
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
# Show statistics per linter.
|
||||
show-stats: false
|
||||
|
||||
linters:
|
||||
# all available settings of specific linters
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
# specify an error message to output when a denylisted package is used
|
||||
- pkg: github.com/sirupsen/logrus
|
||||
desc: "logging is allowed only by logutils.Log"
|
||||
|
||||
dogsled:
|
||||
# checks assignments with too many blank identifiers; default is 2
|
||||
max-blank-identifiers: 2
|
||||
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
|
||||
errcheck:
|
||||
# report about not checking of errors in type assertions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
|
||||
exhaustive:
|
||||
# indicates that switch statements are to be considered exhaustive if a
|
||||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: false
|
||||
|
||||
funlen:
|
||||
lines: 60
|
||||
statements: 40
|
||||
|
||||
gocognit:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
|
||||
nestif:
|
||||
# minimal complexity of if statements to report, 5 by default
|
||||
min-complexity: 4
|
||||
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 5
|
||||
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
settings: # settings passed to gocritic
|
||||
captLocal: # must be valid enabled check name
|
||||
paramsOnly: true
|
||||
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
|
||||
godot:
|
||||
# check all top-level comments, not only declarations
|
||||
check-all: false
|
||||
|
||||
godox:
|
||||
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
|
||||
# might be left in the code accidentally and should be resolved before merging
|
||||
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
|
||||
- NOTE
|
||||
- OPTIMIZE # marks code that should be optimized before merging
|
||||
- HACK # marks hack-arounds that should be removed before merging
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115
|
||||
|
||||
govet:
|
||||
# enable or disable analyzers by name
|
||||
enable:
|
||||
- atomicalign
|
||||
enable-all: false
|
||||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
|
||||
importas:
|
||||
alias:
|
||||
- alias: appsv1api
|
||||
pkg: k8s.io/api/apps/v1
|
||||
- alias: corev1api
|
||||
pkg: k8s.io/api/core/v1
|
||||
- alias: rbacv1
|
||||
pkg: k8s.io/api/rbac/v1
|
||||
- alias: apierrors
|
||||
pkg: k8s.io/apimachinery/pkg/api/errors
|
||||
- alias: apiextv1
|
||||
pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
- alias: metav1
|
||||
pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
- alias: storagev1api
|
||||
pkg: k8s.io/api/storage/v1
|
||||
- alias: batchv1api
|
||||
pkg: k8s.io/api/batch/v1
|
||||
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
ignore-rules:
|
||||
- someword
|
||||
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
|
||||
nolintlint:
|
||||
# Enable to ensure that nolint directives are all used. Default is true.
|
||||
allow-unused: false
|
||||
# Exclude following linters from requiring an explanation. Default is [].
|
||||
allow-no-explanation: []
|
||||
# Enable to require an explanation of nonzero length after each nolint directive. Default is false.
|
||||
require-explanation: true
|
||||
# Enable to require nolint directives to mention the specific linter being suppressed. Default is false.
|
||||
require-specific: true
|
||||
|
||||
perfsprint:
|
||||
strconcat: false
|
||||
sprintf1: false
|
||||
errorf: false
|
||||
int-conversion: true
|
||||
|
||||
revive:
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: true
|
||||
- name: context-as-argument
|
||||
disabled: true
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
disabled: true
|
||||
- name: early-return
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: true
|
||||
- name: error-return
|
||||
disabled: true
|
||||
- name: error-strings
|
||||
disabled: true
|
||||
- name: errorf
|
||||
disabled: true
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
disabled: true
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: use-any
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
disabled: true
|
||||
|
||||
rowserrcheck:
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF1001 # FIXME
|
||||
- -QF1003 # FIXME
|
||||
- -QF1004 # FIXME
|
||||
- -QF1007 # FIXME
|
||||
- -QF1008 # FIXME
|
||||
- -QF1009 # FIXME
|
||||
- -QF1012 # FIXME
|
||||
|
||||
testifylint:
|
||||
# TODO: enable them all
|
||||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
enable-all: true
|
||||
|
||||
testpackage:
|
||||
# regexp pattern to skip files
|
||||
skip-regexp: (export|internal)_test\.go
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
|
||||
usetesting:
|
||||
os-setenv: false
|
||||
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
|
||||
wsl:
|
||||
# If true append is only allowed to be cuddled if appending value is
|
||||
# matching variables, fields or types on line above. Default is true.
|
||||
strict-append: true
|
||||
# Allow calls and assignments to be cuddled as long as the lines have any
|
||||
# matching variables, fields or types. Default is true.
|
||||
allow-assign-and-call: true
|
||||
# Allow multiline assignments to be cuddled. Default is true.
|
||||
allow-multiline-assign: true
|
||||
# Allow declarations (var) to be cuddled.
|
||||
allow-cuddle-declarations: false
|
||||
# Allow trailing comments in ending of blocks
|
||||
allow-trailing-comment: false
|
||||
# Force newlines in end of case at this limit (0 = never).
|
||||
force-case-trailing-whitespace: 0
|
||||
# Force cuddling of err checks with err var assignment
|
||||
force-err-cuddling: false
|
||||
# Allow leading comments to be separated with empty lines
|
||||
allow-separated-leading-comment: false
|
||||
|
||||
default: none
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- copyloopvar
|
||||
- dogsled
|
||||
- dupword
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- errchkjson
|
||||
- exptostd
|
||||
- ginkgolinter
|
||||
- goconst
|
||||
- goheader
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- govet
|
||||
- importas
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nakedret
|
||||
- nilerr
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- thelper
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- usetesting
|
||||
- whitespace
|
||||
|
||||
exclusions:
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
paths:
|
||||
- pkg/plugin/generated/*
|
||||
- third_party
|
||||
|
||||
rules:
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "DefaultVolumesToRestic" # No need to report deprecate for DefaultVolumesToRestic.
|
||||
- path: ".*_test.go$"
|
||||
linters:
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- govet
|
||||
- staticcheck
|
||||
- unparam
|
||||
- unused
|
||||
- path: test/
|
||||
linters:
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- nilerr
|
||||
- staticcheck
|
||||
- unparam
|
||||
- unused
|
||||
- path: ".*data_upload_controller_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
text: "type"
|
||||
- path: ".*config_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
text: "bucket"
|
||||
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
|
||||
issues:
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
# make issues output unique by line, default is true
|
||||
uniq-by-line: true
|
||||
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- pkg/plugin/generated/*
|
||||
- third_party
|
||||
|
||||
settings:
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/vmware-tanzu/velero
|
||||
|
||||
severity:
|
||||
default: error
|
||||
|
||||
# Default value is empty list.
|
||||
# When a list of severity rules are provided, severity information will be added to lint
|
||||
# issues. Severity rules have the same filtering capability as exclude rules except you
|
||||
# are allowed to specify one matcher per severity rule.
|
||||
# Only affects out formats that support setting severity information.
|
||||
rules:
|
||||
- linters:
|
||||
- dupl
|
||||
severity: info
|
||||
|
||||
version: "2"
|
||||
@@ -26,23 +26,18 @@ builds:
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64le
|
||||
- s390x
|
||||
ignore:
|
||||
# don't build arm for darwin and arm/arm64 for windows
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
ldflags:
|
||||
- -X "github.com/vmware-tanzu/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.ImageRegistry={{ .Env.REGISTRY }}"
|
||||
archives:
|
||||
@@ -51,6 +46,9 @@ archives:
|
||||
files:
|
||||
- LICENSE
|
||||
- examples/**/*
|
||||
# Add the setting to resolve the DEPRECATED warning. Actually, Velero's case is not affected by the rlcp behavior change.
|
||||
# https://github.com/orgs/goreleaser/discussions/3659#discussioncomment-4587257
|
||||
rlcp: true
|
||||
checksum:
|
||||
name_template: 'CHECKSUM'
|
||||
release:
|
||||
@@ -65,4 +63,4 @@ git:
|
||||
# tags if there are more than one tag in the same commit.
|
||||
#
|
||||
# Default: `-version:refname`
|
||||
tag_sort: -version:creatordate
|
||||
tag_sort: -version:creatordate
|
||||
@@ -16,7 +16,6 @@ If you're using Velero and want to add your organization to this list,
|
||||
<a href="https://mayadata.io/" border="0" target="_blank"><img alt="mayadata.io" src="site/static/img/adopters/mayadata.svg" height="50"></a>
|
||||
<a href="https://www.replicated.com/" border="0" target="_blank"><img alt="replicated.com" src="site/static/img/adopters/replicated-logo-red.svg" height="50"></a>
|
||||
<a href="https://cloudcasa.io/" border="0" target="_blank"><img alt="cloudcasa.io" src="site/static/img/adopters/cloudcasa.svg" height="50"></a>
|
||||
<a href="https://azure.microsoft.com/" border="0" target="_blank"><img alt="azure.com" src="site/static/img/adopters/azure.svg" height="50"></a>
|
||||
## Success Stories
|
||||
|
||||
Below is a list of adopters of Velero in **production environments** that have
|
||||
@@ -63,10 +62,7 @@ Okteto integrates Velero in [Okteto Cloud][94] and [Okteto Enterprise][95] to pe
|
||||
Replicated uses the Velero open source project to enable snapshots in [KOTS][101] to backup Kubernetes manifests & persistent volumes. In addition to the default functionality that Velero provides, [KOTS][101] provides a detailed interface in the [Admin Console][102] that can be used to manage the storage destination and schedule, and to perform and monitor the backup and restore process.<br>
|
||||
|
||||
**[CloudCasa][103]**<br>
|
||||
[Catalogic Software][104] integrates Velero with [CloudCasa][103] - A Smart Home in the Cloud for Backups. CloudCasa is a full-featured, scalable, cloud-native solution providing Kubernetes data protection, disaster recovery, and migration as a service. An option to manage existing Velero instances and an enterprise self-hosted option are also available.<br>
|
||||
|
||||
**[Microsoft Azure][105]**<br>
|
||||
[Azure Backup for AKS][106] is an Azure native, Kubernetes aware, Enterprise ready backup for containerized applications deployed on Azure Kubernetes Service (AKS). AKS Backup utilizes Velero to perform backup and restore operations to protect stateful applications in AKS clusters.<br>
|
||||
[Catalogic Software][104] integrates Velero with [CloudCasa][103] - A Smart Home in the Cloud for Backups. CloudCasa is a simple, scalable, cloud-native solution providing data protection and disaster recovery as a service. This solution is built using Kubernetes for protecting Kubernetes clusters.<br>
|
||||
|
||||
## Adding your organization to the list of Velero Adopters
|
||||
|
||||
@@ -122,6 +118,3 @@ If you would like to add your logo to a future `Adopters of Velero` section on [
|
||||
|
||||
[103]: https://cloudcasa.io/
|
||||
[104]: https://www.catalogicsoftware.com/
|
||||
|
||||
[105]: https://azure.microsoft.com/
|
||||
[106]: https://learn.microsoft.com/azure/backup/backup-overview
|
||||
|
||||
10
CHANGELOG.md
10
CHANGELOG.md
@@ -1,11 +1,7 @@
|
||||
## Current release:
|
||||
* [CHANGELOG-1.15.md][25]
|
||||
* [CHANGELOG-1.11.md][21]
|
||||
|
||||
## Older releases:
|
||||
* [CHANGELOG-1.14.md][24]
|
||||
* [CHANGELOG-1.13.md][23]
|
||||
* [CHANGELOG-1.12.md][22]
|
||||
* [CHANGELOG-1.11.md][21]
|
||||
* [CHANGELOG-1.10.md][20]
|
||||
* [CHANGELOG-1.9.md][19]
|
||||
* [CHANGELOG-1.8.md][18]
|
||||
@@ -28,10 +24,6 @@
|
||||
* [CHANGELOG-0.3.md][1]
|
||||
|
||||
|
||||
[25]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.15.md
|
||||
[24]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.14.md
|
||||
[23]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.13.md
|
||||
[22]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.12.md
|
||||
[21]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.11.md
|
||||
[20]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.10.md
|
||||
[19]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.9.md
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
We as members, contributors, and leaders pledge to make participation in the Velero project and our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socioeconomic status,
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.10-bullseye as velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -42,16 +42,13 @@ RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$( echo "${GOARM}" | cut -c2-) && \
|
||||
go build -o /output/${BIN} \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-restore-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-restore-helper && \
|
||||
go build -o /output/velero-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.10-bullseye as restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
@@ -73,7 +70,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.11
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
# Copyright the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
ARG PKG
|
||||
ARG VERSION
|
||||
ARG REGISTRY
|
||||
ARG GIT_SHA
|
||||
ARG GIT_TREE_STATE
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOARM=${TARGETVARIANT} \
|
||||
LDFLAGS="-X ${PKG}/pkg/buildinfo.Version=${VERSION} -X ${PKG}/pkg/buildinfo.GitSHA=${GIT_SHA} -X ${PKG}/pkg/buildinfo.GitTreeState=${GIT_TREE_STATE} -X ${PKG}/pkg/buildinfo.ImageRegistry=${REGISTRY}"
|
||||
|
||||
WORKDIR /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
COPY . /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$( echo "${GOARM}" | cut -c2-) && \
|
||||
go build -o /output/${BIN}.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-restore-helper.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-restore-helper && \
|
||||
go build -o /output/velero-helper.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM mcr.microsoft.com/windows/nanoserver:${OS_VERSION}
|
||||
COPY --from=velero-builder /output /
|
||||
|
||||
USER ContainerUser
|
||||
@@ -107,29 +107,6 @@ Lazy consensus does _not_ apply to the process of:
|
||||
|
||||
* Removal of maintainers from Velero
|
||||
|
||||
## Deprecation Policy
|
||||
|
||||
### Deprecation Process
|
||||
|
||||
Any contributor may introduce a request to deprecate a feature or an option of a feature by opening a feature request issue in the vmware-tanzu/velero GitHub project. The issue should describe why the feature is no longer needed or has become detrimental to Velero, as well as whether and how it has been superseded. The submitter should give as much detail as possible.
|
||||
|
||||
Once the issue is filed, a one-month discussion period begins. Discussions take place within the issue itself as well as in the community meetings. The person who opens the issue, or a maintainer, should add the date and time marking the end of the discussion period in a comment on the issue as soon as possible after it is opened. A decision on the issue needs to be made within this one-month period.
|
||||
|
||||
The feature will be deprecated by a supermajority vote of 50% plus one of the project maintainers at the time of the vote tallying, which is 72 hours after the end of the community meeting that is the end of the comment period. (Maintainers are permitted to vote in advance of the deadline, but should hold their votes until as close as possible to hear all possible discussion.) Votes will be tallied in comments on the issue.
|
||||
|
||||
Non-maintainers may add non-binding votes in comments to the issue as well; these are opinions to be taken into consideration by maintainers, but they do not count as votes.
|
||||
|
||||
If the vote passes, the deprecation window takes effect in the subsequent release, and the removal follows the schedule.
|
||||
|
||||
### Schedule
|
||||
If depreciation proposal passes by supermajority votes, the feature is deprecated in the next minor release and the feature can be removed completely after two minor version or equivalent major version e.g., if feature gets deprecated in Nth minor version, then feature can be removed after N+2 minor version or its equivalent if the major version number changes.
|
||||
|
||||
### Deprecation Window
|
||||
|
||||
The deprecation window is the period from the release in which the deprecation takes effect through the release in which the feature is removed. During this period, only critical security vulnerabilities and catastrophic bugs should be fixed.
|
||||
|
||||
**Note:** If a backup relies on a deprecated feature, then backups made with the last Velero release before this feature is removed must still be restorable in version `n+2`. For instance, something like restic feature support, that might mean that restic is removed from the list of supported uploader types in version `n` but the underlying implementation required to restore from a restic backup won't be removed until release `n+2`.
|
||||
|
||||
## Updating Governance
|
||||
|
||||
All substantive changes in Governance require a supermajority agreement by all maintainers.
|
||||
|
||||
@@ -4,16 +4,16 @@
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
|---------------------|---------------------------------------------------------------|--------------------------------------------------|
|
||||
| Scott Seago | [sseago](https://github.com/sseago) | [OpenShift](https://github.com/openshift) |
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/) |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
|
||||
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
|
||||
| Anshul Ahuja | [anshulahuja98](https://github.com/anshulahuja98) | [Microsoft Azure](https://www.github.com/azure/) |
|
||||
| Tiger Kaovilai | [kaovilai](https://github.com/kaovilai) | [OpenShift](https://github.com/openshift) |
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
|---------------------|---------------------------------------------------------------|-------------------------------------------|
|
||||
| Dave Smith-Uchida | [dsu-igeek](https://github.com/dsu-igeek) | [Kasten](https://github.com/kastenhq/) |
|
||||
| Scott Seago | [sseago](https://github.com/sseago) | [OpenShift](https://github.com/openshift) |
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/) |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
|
||||
| Ming Qiu | [qiuming-best](https://github.com/qiuming-best) | [VMware](https://www.github.com/vmware/) |
|
||||
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
|
||||
|
||||
## Emeritus Maintainers
|
||||
* Adnan Abdulhussein ([prydonius](https://github.com/prydonius))
|
||||
@@ -25,13 +25,12 @@
|
||||
* Carlisia Thompson ([carlisia](https://github.com/carlisia))
|
||||
* Bridget McErlean ([zubron](https://github.com/zubron))
|
||||
* JenTing Hsiao ([jenting](https://github.com/jenting))
|
||||
* Dave Smith-Uchida ([dsu-igeek](https://github.com/dsu-igeek))
|
||||
* Ming Qiu ([qiuming-best](https://github.com/qiuming-best))
|
||||
|
||||
## Velero Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
|------------------------|:------------------------------------------------------------------------------------:|
|
||||
| Architect | Dave Smith-Uchida [dsu-igeek](https://github.com/dsu-igeek) |
|
||||
| Technical Lead | Daniel Jiang [reasonerjt](https://github.com/reasonerjt) |
|
||||
| Kubernetes CSI Liaison | |
|
||||
| Deployment | |
|
||||
|
||||
179
Makefile
179
Makefile
@@ -22,26 +22,15 @@ PKG := github.com/vmware-tanzu/velero
|
||||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= velero
|
||||
# In order to push images to an insecure registry, follow the two steps:
|
||||
# 1. Set "INSECURE_REGISTRY=true"
|
||||
# 2. Provide your own buildx builder instance by setting "BUILDX_INSTANCE=your-own-builder-instance"
|
||||
# The builder can be created with the following command:
|
||||
# cat << EOF > buildkitd.toml
|
||||
# [registry."insecure-registry-ip:port"]
|
||||
# http = true
|
||||
# insecure = true
|
||||
# EOF
|
||||
# docker buildx create --name=velero-builder --driver=docker-container --bootstrap --use --config ./buildkitd.toml
|
||||
# Refer to https://github.com/docker/buildx/issues/1370#issuecomment-1288516840 for more details
|
||||
INSECURE_REGISTRY ?= false
|
||||
GCR_REGISTRY ?= gcr.io/velero-gcp
|
||||
|
||||
# Image name
|
||||
IMAGE ?= $(REGISTRY)/$(BIN)
|
||||
GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
|
||||
|
||||
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
|
||||
# that pull base images from different registries.
|
||||
VELERO_DOCKERFILE ?= Dockerfile
|
||||
VELERO_DOCKERFILE_WINDOWS ?= Dockerfile-Windows
|
||||
BUILDER_IMAGE_DOCKERFILE ?= hack/build-image/Dockerfile
|
||||
|
||||
# Calculate the realpath of the build-image Dockerfile as we `cd` into the hack/build
|
||||
@@ -65,7 +54,7 @@ endif
|
||||
BUILDER_IMAGE := $(REGISTRY)/build-image:$(BUILDER_IMAGE_TAG)
|
||||
BUILDER_IMAGE_CACHED := $(shell docker images -q ${BUILDER_IMAGE} 2>/dev/null )
|
||||
|
||||
HUGO_IMAGE := ghcr.io/gohugoio/hugo
|
||||
HUGO_IMAGE := hugo-builder
|
||||
|
||||
# Which architecture to build - see $(ALL_ARCH) for options.
|
||||
# if the 'local' rule is being run, detect the ARCH from 'go env'
|
||||
@@ -79,21 +68,13 @@ TAG_LATEST ?= false
|
||||
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION) $(GCR_IMAGE):latest
|
||||
else
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION)
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
# check buildx is enabled only if docker is in path
|
||||
# macOS/Windows docker cli without Docker Desktop license: https://github.com/abiosoft/colima
|
||||
# To add buildx to docker cli: https://github.com/abiosoft/colima/discussions/273#discussioncomment-2684502
|
||||
ifeq ($(shell which docker 2>/dev/null 1>&2 && docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
|
||||
BUILDX_ENABLED ?= true
|
||||
# if emulated docker cli from podman, assume enabled
|
||||
# emulated docker cli from podman: https://podman-desktop.io/docs/migrating-from-docker/emulating-docker-cli-with-podman
|
||||
# podman known issues:
|
||||
# - on remote podman, such as on macOS,
|
||||
# --output issue: https://github.com/containers/podman/issues/15922
|
||||
else ifeq ($(shell which docker 2>/dev/null 1>&2 && cat $(shell which docker) | grep -c "exec podman"), 1)
|
||||
ifeq ($(shell docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
|
||||
BUILDX_ENABLED ?= true
|
||||
else
|
||||
BUILDX_ENABLED ?= false
|
||||
@@ -103,32 +84,13 @@ define BUILDX_ERROR
|
||||
buildx not enabled, refusing to run this recipe
|
||||
see: https://velero.io/docs/main/build-from-source/#making-images-and-updating-velero for more info
|
||||
endef
|
||||
# comma cannot be escaped and can only be used in Make function arguments by putting into variable
|
||||
comma=,
|
||||
|
||||
# The version of restic binary to be downloaded
|
||||
RESTIC_VERSION ?= 0.15.0
|
||||
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le linux-s390x
|
||||
BUILD_OUTPUT_TYPE ?= docker
|
||||
BUILD_OS ?= linux
|
||||
BUILD_ARCH ?= amd64
|
||||
BUILD_WINDOWS_VERSION ?= ltsc2022
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), docker)
|
||||
ALL_OS = linux
|
||||
ALL_ARCH.linux = $(word 2, $(subst -, ,$(shell go env GOOS)-$(shell go env GOARCH)))
|
||||
else
|
||||
ALL_OS = $(subst $(comma), ,$(BUILD_OS))
|
||||
ALL_ARCH.linux = $(subst $(comma), ,$(BUILD_ARCH))
|
||||
endif
|
||||
|
||||
ALL_ARCH.windows = $(if $(filter windows,$(ALL_OS)),amd64,)
|
||||
ALL_OSVERSIONS.windows = $(if $(filter windows,$(ALL_OS)),$(BUILD_WINDOWS_VERSION),)
|
||||
ALL_OS_ARCH.linux = $(foreach os, $(filter linux,$(ALL_OS)), $(foreach arch, ${ALL_ARCH.linux}, ${os}-$(arch)))
|
||||
ALL_OS_ARCH.windows = $(foreach os, $(filter windows,$(ALL_OS)), $(foreach arch, $(ALL_ARCH.windows), $(foreach osversion, ${ALL_OSVERSIONS.windows}, ${os}-${osversion}-${arch})))
|
||||
ALL_OS_ARCH = $(ALL_OS_ARCH.linux)$(ALL_OS_ARCH.windows)
|
||||
|
||||
ALL_IMAGE_TAGS = $(IMAGE_TAGS)
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le
|
||||
BUILDX_PLATFORMS ?= $(subst -,/,$(ARCH))
|
||||
BUILDX_OUTPUT_TYPE ?= docker
|
||||
|
||||
# set git sha and tree state
|
||||
GIT_SHA = $(shell git rev-parse HEAD)
|
||||
@@ -146,26 +108,27 @@ platform_temp = $(subst -, ,$(ARCH))
|
||||
GOOS = $(word 1, $(platform_temp))
|
||||
GOARCH = $(word 2, $(platform_temp))
|
||||
GOPROXY ?= https://proxy.golang.org
|
||||
GOBIN=$$(pwd)/.go/bin
|
||||
|
||||
# If you want to build all binaries, see the 'all-build' rule.
|
||||
# If you want to build all containers, see the 'all-containers' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@$(MAKE) build BIN=velero-restore-helper
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@$(MAKE) --no-print-directory ARCH=$* build BIN=velero-restore-helper
|
||||
|
||||
all-build: $(addprefix build-, $(CLI_PLATFORMS))
|
||||
|
||||
all-containers:
|
||||
@$(MAKE) --no-print-directory container
|
||||
@$(MAKE) --no-print-directory container BIN=velero-restore-helper
|
||||
|
||||
local: build-dirs
|
||||
# Add DEBUG=1 to enable debug locally
|
||||
GOOS=$(GOOS) \
|
||||
GOARCH=$(GOARCH) \
|
||||
GOBIN=$(GOBIN) \
|
||||
VERSION=$(VERSION) \
|
||||
REGISTRY=$(REGISTRY) \
|
||||
PKG=$(PKG) \
|
||||
@@ -182,7 +145,6 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
||||
$(MAKE) shell CMD="-c '\
|
||||
GOOS=$(GOOS) \
|
||||
GOARCH=$(GOARCH) \
|
||||
GOBIN=$(GOBIN) \
|
||||
VERSION=$(VERSION) \
|
||||
REGISTRY=$(REGISTRY) \
|
||||
PKG=$(PKG) \
|
||||
@@ -221,38 +183,11 @@ container:
|
||||
ifneq ($(BUILDX_ENABLED), true)
|
||||
$(error $(BUILDX_ERROR))
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_INSTANCE),)
|
||||
@echo creating a buildx instance
|
||||
-docker buildx rm velero-builder || true
|
||||
@docker buildx create --use --name=velero-builder
|
||||
else
|
||||
@echo using a specified buildx instance $(BUILDX_INSTANCE)
|
||||
@docker buildx use $(BUILDX_INSTANCE)
|
||||
endif
|
||||
|
||||
@mkdir -p _output
|
||||
|
||||
@for osarch in $(ALL_OS_ARCH); do \
|
||||
$(MAKE) container-$${osarch}; \
|
||||
done
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), registry)
|
||||
@for tag in $(ALL_IMAGE_TAGS); do \
|
||||
IMAGE_TAG=$${tag} $(MAKE) push-manifest; \
|
||||
done
|
||||
endif
|
||||
|
||||
container-linux-%:
|
||||
@BUILDX_ARCH=$* $(MAKE) container-linux
|
||||
|
||||
container-linux:
|
||||
@echo "building container: $(IMAGE):$(VERSION)-linux-$(BUILDX_ARCH)"
|
||||
|
||||
@docker buildx build --pull \
|
||||
--output="type=$(BUILD_OUTPUT_TYPE)$(if $(findstring tar, $(BUILD_OUTPUT_TYPE)),$(comma)dest=_output/$(BIN)-$(VERSION)-linux-$(BUILDX_ARCH).tar,)" \
|
||||
--platform="linux/$(BUILDX_ARCH)" \
|
||||
$(addprefix -t , $(addsuffix "-linux-$(BUILDX_ARCH)",$(ALL_IMAGE_TAGS))) \
|
||||
--output=type=$(BUILDX_OUTPUT_TYPE) \
|
||||
--platform $(BUILDX_PLATFORMS) \
|
||||
$(addprefix -t , $(IMAGE_TAGS)) \
|
||||
$(addprefix -t , $(GCR_IMAGE_TAGS)) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
@@ -261,54 +196,14 @@ container-linux:
|
||||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--build-arg=RESTIC_VERSION=$(RESTIC_VERSION) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE) .
|
||||
|
||||
@echo "built container: $(IMAGE):$(VERSION)-linux-$(BUILDX_ARCH)"
|
||||
|
||||
container-windows-%:
|
||||
@BUILDX_OSVERSION=$(firstword $(subst -, ,$*)) BUILDX_ARCH=$(lastword $(subst -, ,$*)) $(MAKE) container-windows
|
||||
|
||||
container-windows:
|
||||
@echo "building container: $(IMAGE):$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)"
|
||||
|
||||
@docker buildx build --pull \
|
||||
--output="type=$(BUILD_OUTPUT_TYPE)$(if $(findstring tar, $(BUILD_OUTPUT_TYPE)),$(comma)dest=_output/$(BIN)-$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH).tar,)" \
|
||||
--platform="windows/$(BUILDX_ARCH)" \
|
||||
$(addprefix -t , $(addsuffix "-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)",$(ALL_IMAGE_TAGS))) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
--build-arg=VERSION=$(VERSION) \
|
||||
--build-arg=OS_VERSION=$(BUILDX_OSVERSION) \
|
||||
--build-arg=GIT_SHA=$(GIT_SHA) \
|
||||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE_WINDOWS) .
|
||||
|
||||
@echo "built container: $(IMAGE):$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)"
|
||||
|
||||
push-manifest:
|
||||
@echo "building manifest: $(IMAGE_TAG) for $(foreach osarch, $(ALL_OS_ARCH), $(IMAGE_TAG)-${osarch})"
|
||||
@docker manifest create --amend --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG) $(foreach osarch, $(ALL_OS_ARCH), $(IMAGE_TAG)-${osarch})
|
||||
|
||||
@set -x; \
|
||||
for arch in $(ALL_ARCH.windows); do \
|
||||
for osversion in $(ALL_OSVERSIONS.windows); do \
|
||||
BASEIMAGE=mcr.microsoft.com/windows/nanoserver:$${osversion}; \
|
||||
full_version=`docker manifest inspect --insecure=$(INSECURE_REGISTRY) $${BASEIMAGE} | jq -r '.manifests[0].platform["os.version"]'`; \
|
||||
docker manifest annotate --os windows --arch $${arch} --os-version $${full_version} $(IMAGE_TAG) $(IMAGE_TAG)-windows-$${osversion}-$${arch}; \
|
||||
done; \
|
||||
done
|
||||
|
||||
@echo "pushing manifest $(IMAGE_TAG)"
|
||||
@docker manifest push --purge --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG)
|
||||
|
||||
@echo "pushed manifest $(IMAGE_TAG):"
|
||||
@docker manifest inspect --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG)
|
||||
@echo "container: $(IMAGE):$(VERSION)"
|
||||
ifeq ($(BUILDX_OUTPUT_TYPE)_$(REGISTRY), registry_velero)
|
||||
docker pull $(IMAGE):$(VERSION)
|
||||
rm -f $(BIN)-$(VERSION).tar
|
||||
docker save $(IMAGE):$(VERSION) -o $(BIN)-$(VERSION).tar
|
||||
gzip -f $(BIN)-$(VERSION).tar
|
||||
endif
|
||||
|
||||
SKIP_TESTS ?=
|
||||
test: build-dirs
|
||||
@@ -451,7 +346,7 @@ release:
|
||||
serve-docs: build-image-hugo
|
||||
docker run \
|
||||
--rm \
|
||||
-v "$$(pwd)/site:/project" \
|
||||
-v "$$(pwd)/site:/srv/hugo" \
|
||||
-it -p 1313:1313 \
|
||||
$(HUGO_IMAGE) \
|
||||
server --bind=0.0.0.0 --enableGitInfo=false
|
||||
@@ -462,29 +357,11 @@ gen-docs:
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: local
|
||||
$(MAKE) -e VERSION=$(VERSION) -C test/ run-e2e
|
||||
$(MAKE) -e VERSION=$(VERSION) -C test/e2e run
|
||||
|
||||
.PHONY: test-perf
|
||||
test-perf: local
|
||||
$(MAKE) -e VERSION=$(VERSION) -C test/ run-perf
|
||||
$(MAKE) -e VERSION=$(VERSION) -C test/perf run
|
||||
|
||||
go-generate:
|
||||
go generate ./pkg/...
|
||||
|
||||
# requires an authenticated gh cli
|
||||
# gh: https://cli.github.com/
|
||||
# First create a PR
|
||||
# gh pr create --title 'Title name' --body 'PR body'
|
||||
# by default uses PR title as changelog body but can be overwritten like so
|
||||
# make new-changelog CHANGELOG_BODY="Changes you have made"
|
||||
new-changelog: GH_LOGIN ?= $(shell gh pr view --json author --jq .author.login 2> /dev/null)
|
||||
new-changelog: GH_PR_NUMBER ?= $(shell gh pr view --json number --jq .number 2> /dev/null)
|
||||
new-changelog: CHANGELOG_BODY ?= '$(shell gh pr view --json title --jq .title)'
|
||||
new-changelog:
|
||||
@if [ "$(GH_LOGIN)" = "" ]; then \
|
||||
echo "branch does not have PR or cli not logged in, try 'gh auth login' or 'gh pr create'"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@mkdir -p ./changelogs/unreleased/ && \
|
||||
echo $(CHANGELOG_BODY) > ./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN) && \
|
||||
echo \"$(CHANGELOG_BODY)\" added to "./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN)"
|
||||
go generate ./pkg/...
|
||||
24
OWNERS
24
OWNERS
@@ -1,24 +0,0 @@
|
||||
# This file is used by the [PROW action](https://github.com/jpmcb/prow-github-actions) to approve and merge PRs.
|
||||
# The file's format follows the [OWNERS SPEC](https://www.kubernetes.dev/docs/guide/owners/#owners-spec).
|
||||
|
||||
# List of usernames who may use /lgtm
|
||||
reviewers:
|
||||
- @Lyndon-Li
|
||||
- @anshulahuja98
|
||||
- @blackpiglet
|
||||
- @qiuming-best
|
||||
- @reasonerjt
|
||||
- @shubham-pampattiwar
|
||||
- @sseago
|
||||
- @ywk253100
|
||||
|
||||
# List of usernames who may use /approve
|
||||
approvers:
|
||||
- @Lyndon-Li
|
||||
- @anshulahuja98
|
||||
- @blackpiglet
|
||||
- @qiuming-best
|
||||
- @reasonerjt
|
||||
- @shubham-pampattiwar
|
||||
- @sseago
|
||||
- @ywk253100
|
||||
18
README.md
18
README.md
@@ -40,19 +40,17 @@ See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
The following is a list of the supported Kubernetes versions for each Velero version.
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
| 1.13 | 1.18-latest | 1.26.5, 1.27.3, 1.27.8, and 1.28.3 |
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.26.7, and 1.27.3 |
|
||||
| 1.11 | 1.18-latest | 1.23.10, 1.24.9, 1.25.5, and 1.26.1 |
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|----------------------------------------|
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.27.6 and 1.28.0 |
|
||||
| 1.11 | 1.18-latest | 1.23.10, 1.24.9, 1.25.5, and 1.26.1 |
|
||||
| 1.10 | 1.18-latest | 1.22.5, 1.23.8, 1.24.6 and 1.25.1 |
|
||||
| 1.9 | 1.18-latest | 1.20.5, 1.21.2, 1.22.5, 1.23, and 1.24 |
|
||||
| 1.8 | 1.18-latest | |
|
||||
|
||||
Velero supports IPv4, IPv6, and dual stack environments. Support for this was tested against Velero v1.8.
|
||||
|
||||
The Velero maintainers are continuously working to expand testing coverage, but are not able to test every combination of Velero and supported Kubernetes versions for each Velero release. The table above is meant to track the current testing coverage and the expected supported Kubernetes versions for each Velero version.
|
||||
The Velero maintainers are continuously working to expand testing coverage, but are not able to test every combination of Velero and supported Kubernetes versions for each Velero release. The table above is meant to track the current testing coverage and the expected supported Kubernetes versions for each Velero version. If you have a question about test coverage before v1.9, please reach out in the [#velero-users](https://kubernetes.slack.com/archives/C6VCGP4MT) Slack channel.
|
||||
|
||||
If you are interested in using a different version of Kubernetes with a given Velero version, we'd recommend that you perform testing before installing or upgrading your environment. For full information around capabilities within a release, also see the Velero [release notes](https://github.com/vmware-tanzu/velero/releases) or Kubernetes [release notes](https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG). See the Velero [support page](https://velero.io/docs/latest/support-process/) for information about supported versions of Velero.
|
||||
|
||||
|
||||
18
SECURITY.md
18
SECURITY.md
@@ -12,13 +12,13 @@ The Velero project maintains the following [governance document](https://github.
|
||||
|
||||
Security is of the highest importance and all security vulnerabilities or suspected security vulnerabilities should be reported to Velero privately, to minimize attacks against current users of Velero before they are fixed. Vulnerabilities will be investigated and patched on the next patch (or minor) release as soon as possible. This information could be kept entirely internal to the project.
|
||||
|
||||
If you know of a publicly disclosed security vulnerability for Velero, please **IMMEDIATELY** contact the Security Team (velero-security.pdl@broadcom.com).
|
||||
If you know of a publicly disclosed security vulnerability for Velero, please **IMMEDIATELY** contact the VMware Security Team (security@vmware.com).
|
||||
|
||||
|
||||
|
||||
**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities**
|
||||
|
||||
To report a vulnerability or a security-related issue, please contact the email address with the details of the vulnerability. The email will be fielded by the Security Team and then shared with the Velero maintainers who have committer and release permissions. Emails will be addressed within 3 business days, including a detailed plan to investigate the issue and any potential workarounds to perform in the meantime. Do not report non-security-impacting bugs through this channel. Use [GitHub issues](https://github.com/vmware-tanzu/velero/issues/new/choose) instead.
|
||||
To report a vulnerability or a security-related issue, please contact the VMware email address with the details of the vulnerability. The email will be fielded by the VMware Security Team and then shared with the Velero maintainers who have committer and release permissions. Emails will be addressed within 3 business days, including a detailed plan to investigate the issue and any potential workarounds to perform in the meantime. Do not report non-security-impacting bugs through this channel. Use [GitHub issues](https://github.com/vmware-tanzu/velero/issues/new/choose) instead.
|
||||
|
||||
|
||||
## Proposed Email Content
|
||||
@@ -29,7 +29,7 @@ Provide a descriptive subject line and in the body of the email include the foll
|
||||
|
||||
* Basic identity information, such as your name and your affiliation or company.
|
||||
* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and logs are all helpful to us).
|
||||
* Description of the effects of the vulnerability on Velero and the related hardware and software configurations, so that the Security Team can reproduce it.
|
||||
* Description of the effects of the vulnerability on Velero and the related hardware and software configurations, so that the VMware Security Team can reproduce it.
|
||||
* How the vulnerability affects Velero usage and an estimation of the attack surface, if there is one.
|
||||
* List other projects or dependencies that were used in conjunction with Velero to produce the vulnerability.
|
||||
|
||||
@@ -49,7 +49,7 @@ Provide a descriptive subject line and in the body of the email include the foll
|
||||
|
||||
## Patch, Release, and Disclosure
|
||||
|
||||
The Security Team will respond to vulnerability reports as follows:
|
||||
The VMware Security Team will respond to vulnerability reports as follows:
|
||||
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ The Security Team will respond to vulnerability reports as follows:
|
||||
5. The Security Team will also create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0). The Security Team makes the final call on the calculated CVSS; it is better to move quickly than making the CVSS perfect. Issues may also be reported to [Mitre](https://cve.mitre.org/) using this [scoring calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will initially be set to private.
|
||||
6. The Security Team will work on fixing the vulnerability and perform internal testing before preparing to roll out the fix.
|
||||
7. The Security Team will provide early disclosure of the vulnerability by emailing the [Velero Distributors](https://groups.google.com/u/1/g/projectvelero-distributors) mailing list. Distributors can initially plan for the vulnerability patch ahead of the fix, and later can test the fix and provide feedback to the Velero team. See the section **Early Disclosure to Velero Distributors List** for details about how to join this mailing list.
|
||||
8. A public disclosure date is negotiated by the SecurityTeam, the bug submitter, and the distributors list. We prefer to fully disclose the bug as soon as possible once a user mitigation or patch is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for distributor coordination. The timeframe for disclosure is from immediate (especially if it’s already publicly known) to a few weeks. For a critical vulnerability with a straightforward mitigation, we expect the report date for the public disclosure date to be on the order of 14 business days. The Security Team holds the final say when setting a public disclosure date.
|
||||
8. A public disclosure date is negotiated by the VMware SecurityTeam, the bug submitter, and the distributors list. We prefer to fully disclose the bug as soon as possible once a user mitigation or patch is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for distributor coordination. The timeframe for disclosure is from immediate (especially if it’s already publicly known) to a few weeks. For a critical vulnerability with a straightforward mitigation, we expect the report date for the public disclosure date to be on the order of 14 business days. The VMware Security Team holds the final say when setting a public disclosure date.
|
||||
9. Once the fix is confirmed, the Security Team will patch the vulnerability in the next patch or minor release, and backport a patch release into all earlier supported releases. Upon release of the patched version of Velero, we will follow the **Public Disclosure Process**.
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ The Security Team will also publish any mitigating steps users can take until th
|
||||
|
||||
|
||||
|
||||
* Use velero-security.pdl@broadcom.com to report security concerns to the Security Team, who uses the list to privately discuss security issues and fixes prior to disclosure.
|
||||
* Use security@vmware.com to report security concerns to the VMware Security Team, who uses the list to privately discuss security issues and fixes prior to disclosure.
|
||||
* Join the [Velero Distributors](https://groups.google.com/u/1/g/projectvelero-distributors) mailing list for early private information and vulnerability disclosure. Early disclosure may include mitigating steps and additional information on security patch releases. See below for information on how Velero distributors or vendors can apply to join this list.
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ To be eligible to join the [Velero Distributors](https://groups.google.com/u/1/g
|
||||
|
||||
## Embargo Policy
|
||||
|
||||
The information that members receive on the Velero Distributors mailing list must not be made public, shared, or even hinted at anywhere beyond those who need to know within your specific team, unless you receive explicit approval to do so from the Security Team. This remains true until the public disclosure date/time agreed upon by the list. Members of the list and others cannot use the information for any reason other than to get the issue fixed for your respective distribution's users.
|
||||
The information that members receive on the Velero Distributors mailing list must not be made public, shared, or even hinted at anywhere beyond those who need to know within your specific team, unless you receive explicit approval to do so from the VMware Security Team. This remains true until the public disclosure date/time agreed upon by the list. Members of the list and others cannot use the information for any reason other than to get the issue fixed for your respective distribution's users.
|
||||
|
||||
Before you share any information from the list with members of your team who are required to fix the issue, these team members must agree to the same terms, and only be provided with information on a need-to-know basis.
|
||||
|
||||
In the unfortunate event that you share information beyond what is permitted by this policy, you must urgently inform the Security Team (velero-security.pdl@broadcom.com) of exactly what information was leaked and to whom. If you continue to leak information and break the policy outlined here, you will be permanently removed from the list.
|
||||
In the unfortunate event that you share information beyond what is permitted by this policy, you must urgently inform the VMware Security Team (security@vmware.com) of exactly what information was leaked and to whom. If you continue to leak information and break the policy outlined here, you will be permanently removed from the list.
|
||||
|
||||
|
||||
|
||||
@@ -123,6 +123,6 @@ Send new membership requests to projectvelero-distributors@googlegroups.com. In
|
||||
|
||||
## Confidentiality, integrity and availability
|
||||
|
||||
We consider vulnerabilities leading to the compromise of data confidentiality, elevation of privilege, or integrity to be our highest priority concerns. Availability, in particular in areas relating to DoS and resource exhaustion, is also a serious security concern. The Security Team takes all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities seriously and will investigate them in an urgent and expeditious manner.
|
||||
We consider vulnerabilities leading to the compromise of data confidentiality, elevation of privilege, or integrity to be our highest priority concerns. Availability, in particular in areas relating to DoS and resource exhaustion, is also a serious security concern. The VMware Security Team takes all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities seriously and will investigate them in an urgent and expeditious manner.
|
||||
|
||||
Note that we do not currently consider the default settings for Velero to be secure-by-default. It is necessary for operators to explicitly configure settings, role based access control, and other resource related features in Velero to provide a hardened Velero environment. We will not act on any security disclosure that relates to a lack of safe defaults. Over time, we will work towards improved safe-by-default configuration, taking into account backwards compatibility.
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.25 as tilt-helper
|
||||
FROM golang:1.20.10 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1,3 +1,122 @@
|
||||
## v1.12.4
|
||||
### 2024-02-26
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.12.4
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.12.4`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.12/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.12/upgrade-to-1.12/
|
||||
|
||||
### All changes
|
||||
* BackupRepositories associated with a BSL are invalidated when BSL is (re-)created. (#7397, @kaovilai)
|
||||
* Check resource Group Version and Kind is available in cluster before attempting restore to prevent being stuck. (#7337, @kaovilai)
|
||||
* Make "disable-informer-cache" option false(enabled) by default to keep it consistent with the help message (#7298, @ywk253100)
|
||||
* Add description markers for dataupload and datadownload CRDs (#7042, @shubham-pampattiwar)
|
||||
|
||||
## v1.12.3
|
||||
### 2024-01-09
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.12.3
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.12.3`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.12/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.12/upgrade-to-1.12/
|
||||
|
||||
### All changes
|
||||
* Fix issue #7244. By the end of the upload, check the outstanding incomplete snapshots and delete them by calling ApplyRetentionPolicy (#7247, @Lyndon-Li)
|
||||
* Fix issue #7189, data mover generic restore - don't assume the first volume as the restore volume (#7203, @Lyndon-Li)
|
||||
* Update CSIVolumeSnapshotsCompleted in backup's status and the metric
|
||||
during backup finalize stage according to async operations content. (#7202, @blackpiglet)
|
||||
* Node agent restart enhancement (#7130, @qiuming-best)
|
||||
* Fix issue #6928, remove snapshot deletion timeout for PVB (#7283, @Lyndon-Li)
|
||||
|
||||
## v1.12.2
|
||||
### 2023-11-20
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.12.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.12.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.12/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.12/upgrade-to-1.12/
|
||||
|
||||
### All changes
|
||||
* Fix issue #7068, due to a behavior of CSI external snapshotter, manipulations of VS and VSC may not be handled in the same order inside external snapshotter as the API is called. So add a protection finalizer to ensure the order (#7114, @Lyndon-Li)
|
||||
* Update Backup.Status.CSIVolumeSnapshotsCompleted during finalize (#7111, @kaovilai)
|
||||
* Cherry-pick #6917 - Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers (#7049, @27149chen)
|
||||
* Bump up Velero base image to latest patch release (#7110, @allenxu404)
|
||||
* Fix the node-agent missing metrics-address defines. (#7098, @yanggangtony)
|
||||
* Fix issue #7094, fallback to full backup if previous snapshot is not found (#7097, @Lyndon-Li)
|
||||
* Add DataUpload Result and CSI VolumeSnapshot check for restore PV. (#7087, @blackpiglet)
|
||||
* Fix issue #7027, data mover backup exposer should not assume the first volume as the backup volume in backup pod (#7060, @Lyndon-Li)
|
||||
* Truncate the credential file to avoid the change of secret content messing it up (#7058, @ywk253100)
|
||||
* restore: Use warning when Create IsAlreadyExist and Get error (#7054, @kaovilai)
|
||||
* Read information from the credential specified by BSL (#7033, @ywk253100)
|
||||
* Fix issue 6913: Velero Built-in Datamover: Backup stucks in phase WaitingForPluginOperations when Node Agent pod gets restarted (#7025, @shubham-pampattiwar)
|
||||
* Fix unified repository (kopia) s3 credentials profile selection (#6997, @kaovilai)
|
||||
|
||||
## v1.12.1
|
||||
### 2023-10-20
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.12.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.12.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.12/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.12/upgrade-to-1.12/
|
||||
|
||||
### Highlights
|
||||
|
||||
#### Data Mover Adds Support for Block Mode Volumes
|
||||
For PersistentVolumes with volumeMode set as Block, the volumes are mounted as raw block devices in pods, in 1.12.1, Velero CSI snapshot data movement supports to backup and restore this kind of volumes under linux based Kubernetes clusters.
|
||||
|
||||
#### New Parameter in Installation to Enable Data Mover
|
||||
The `velero install` sub-command now includes a new parameter,`--default-snapshot-move-data`, which configures Velero server to move data by default for all snapshots supporting data movement. This feature is useful for users who will always want to use VBDM for backups instead of plain CSI , as they no longer need to specify the `--snapshot-move-data` flag for each individual backup.
|
||||
|
||||
#### Velero Base Image change
|
||||
The base image previously used by Velero was `distroless`, which contains several CVEs cannot be addressed quickly. As a result, Velero will now use `paketobuildpacks` image starting from this new version.
|
||||
|
||||
### Limitations/Known issues
|
||||
* The data mover's support for block mode volumes is currently only applicable to Linux environments.
|
||||
|
||||
### All changes
|
||||
* Import auth provider plugins (#6970, @0x113)
|
||||
* Perf improvements for existing resource restore (#6948, @sseago)
|
||||
* Retry failed create when using generateName (#6943, @sseago)
|
||||
* Fix issue #6647, add the --default-snapshot-move-data parameter to Velero install, so that users don't need to specify --snapshot-move-data per backup when they want to move snapshot data for all backups (#6940, @Lyndon-Li)
|
||||
* Partially fix #6734, guide Kubernetes' scheduler to spread backup pods evenly across nodes as much as possible, so that data mover backup could achieve better parallelism (#6935, @Lyndon-Li)
|
||||
* Replace the base image with paketobuildpacks image (#6934, @ywk253100)
|
||||
* Add support for block volumes with Kopia (#6897, @dzaninovic)
|
||||
* Set ParallelUploadAboveSize as MaxInt64 and flush repo after setting up policy so that policy is retrieved correctly by TreeForSource (#6886, @Lyndon-Li)
|
||||
* Kubernetes 1.27 new job label batch.kubernetes.io/controller-uid are deleted during restore per https://github.com/kubernetes/kubernetes/pull/114930 (#6713, @kaovilai)
|
||||
* Add `orLabelSelectors` for backup, restore commands (#6881, @nilesh-akhade)
|
||||
* Fix issue #6859, move plugin depending podvolume functions to util pkg, so as to remove the dependencies to unnecessary repository packages like kopia, azure, etc. (#6877, @Lyndon-Li)
|
||||
* Fix issue #6786, always delete VSC regardless of the deletion policy (#6873, @Lyndon-Li)
|
||||
* Fix #6988, always get region from BSL if it is not empty (#6991, @Lyndon-Li)
|
||||
* Add both non-Windows version and Windows version code for PVC block mode logic. (#6986, @blackpiglet)
|
||||
|
||||
## v1.12
|
||||
### 2023-08-18
|
||||
|
||||
@@ -23,17 +142,17 @@ CSI Snapshot Data Movement is useful in below scenarios:
|
||||
* For on-premises users, the storage usually doesn't support durable snapshots, so it is impossible/less efficient/cost ineffective to keep volume snapshots by the storage This feature helps to move the snapshot data to a storage with lower cost and larger scale for long time preservation.
|
||||
* For public cloud users, this feature helps users to fulfill the multiple cloud strategy. It allows users to back up volume snapshots from one cloud provider and preserve or restore the data to another cloud provider. Then users will be free to flow their business data across cloud providers based on Velero backup and restore
|
||||
|
||||
CSI Snapshot Data Movement is built according to the Volume Snapshot Data Movement design ([Volume Snapshot Data Movement](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md)). More details can be found in the design.
|
||||
CSI Snapshot Data Movement is built according to the Volume Snapshot Data Movement design ([Volume Snapshot Data Movement design](https://github.com/vmware-tanzu/velero/blob/main/design/volume-snapshot-data-movement/volume-snapshot-data-movement.md)). Additionally, guidance on how to use the feature can be found in the Volume Snapshot Data Movement doc([Volume Snapshot Data Movement doc](https://velero.io/docs/v1.12/csi-snapshot-data-movement)).
|
||||
|
||||
#### Resource Modifiers
|
||||
In many use cases, customers often need to substitute specific values in Kubernetes resources during the restoration process like changing the namespace, changing the storage class, etc.
|
||||
|
||||
To address this need, Resource Modifiers (also known as JSON Substitutions) offer a generic solution in the restore workflow. It allows the user to define filters for specific resources and then specify a JSON patch (operator, path, value) to apply to the resource. This feature simplifies the process of making substitutions without requiring the implementation of a new RestoreItemAction plugin. More details can be found in Volume Snapshot Resource Modifiers design ([Resource Modifiers](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/json-substitution-action-design.md)).
|
||||
To address this need, Resource Modifiers (also known as JSON Substitutions) offer a generic solution in the restore workflow. It allows the user to define filters for specific resources and then specify a JSON patch (operator, path, value) to apply to the resource. This feature simplifies the process of making substitutions without requiring the implementation of a new RestoreItemAction plugin. More design details can be found in Resource Modifiers design ([Resource Modifiers design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/json-substitution-action-design.md)). For instructions on how to use the feature, please refer to Resource Modifiers doc([Resource Modifiers doc](https://velero.io/docs/v1.12/restore-resource-modifiers)).
|
||||
|
||||
#### Multiple VolumeSnapshotClasses
|
||||
Prior to version 1.12, the Velero CSI plugin would choose the VolumeSnapshotClass in the cluster based on matching driver names and the presence of the "velero.io/csi-volumesnapshot-class" label. However, this approach proved inadequate for many user scenarios.
|
||||
|
||||
With the introduction of version 1.12, Velero now offers support for multiple VolumeSnapshotClasses in the CSI Plugin, enabling users to select a specific class for a particular backup. More details can be found in Multiple VolumeSnapshotClasses design ([Multiple VolumeSnapshotClasses](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/multiple-csi-volumesnapshotclass-support.md)).
|
||||
With the introduction of version 1.12, Velero now offers support for multiple VolumeSnapshotClasses in the CSI Plugin, enabling users to select a specific class for a particular backup. More design details can be found in Multiple VolumeSnapshotClasses design ([Multiple VolumeSnapshotClasses design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/multiple-csi-volumesnapshotclass-support.md)). For instructions on how to use the feature, please refer to Multiple VolumeSnapshotClasses doc ([Multiple VolumeSnapshotClasses doc](https://velero.io/docs/v1.12/csi/#implementation-choices)).
|
||||
|
||||
#### Restore Finalizer
|
||||
Before v1.12, the restore controller would only delete restore resources but wouldn’t delete restore data from the backup storage location when the command `velero restore delete` was executed. The only chance Velero deletes restores data from the backup storage location is when the associated backup is deleted.
|
||||
@@ -51,10 +170,12 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
|
||||
* Prior to v1.12, the parameter `uploader-type` for Velero installation had a default value of "restic". However, starting from this version, the default value has been changed to "kopia". This means that Velero will now use Kopia as the default path for file system backup.
|
||||
* The ways of setting CSI snapshot time have changed in v1.12. First, the sync waiting time for creating a snapshot handle in the CSI plugin is changed from the fixed 10 minutes into backup.Spec.CSISnapshotTimeout. The second, the async waiting time for VolumeSnapshot and VolumeSnapshotContent's status turning into `ReadyToUse` in operation uses the operation's timeout. The default value is 4 hours.
|
||||
* As from [Velero helm chart v4.0.0](https://github.com/vmware-tanzu/helm-charts/releases/tag/velero-4.0.0), it supports multiple BSL and VSL, and the BSL and VSL have changed from the map into a slice, and[ this breaking change](https://github.com/vmware-tanzu/helm-charts/pull/413) is not backward compatible. So it would be best to change the BSL and VSL configuration into slices before the Upgrade.
|
||||
* Prior to v1.12, deleting the Velero namespace would easily remove all the resources within it. However, with the introduction of finalizers attached to the Velero CR including `restore`, `dataupload`, and `datadownload` in this version, directly deleting Velero namespace may get stuck indefinitely because the pods responsible for handling the finalizers might be deleted before the resources attached to the finalizers. To avoid this issue, please use the command `velero uninstall` to delete all the Velero resources or ensure that you handle the finalizer appropriately before deleting the Velero namespace.
|
||||
|
||||
|
||||
### Limitations/Known issues
|
||||
* The Azure plugin supports Azure AD Workload identity way, but it only works for Velero native snapshots. It cannot support filesystem backup and snapshot data mover scenarios.
|
||||
* File System backup under Kopia path and CSI Snapshot Data Movement backup fail to back up files that are large the 2GiB due to issue https://github.com/vmware-tanzu/velero/issues/6668.
|
||||
|
||||
|
||||
### All Changes
|
||||
@@ -132,3 +253,10 @@ prior PVC restores with CSI (#6111, @eemcmullan)
|
||||
* Make GetPluginConfig accessible from other packages. (#6151, @tkaovila)
|
||||
* Ignore not found error during patching managedFields (#6136, @ywk253100)
|
||||
* Fix the goreleaser issues and add a new goreleaser action (#6109, @blackpiglet)
|
||||
* Add CSI snapshot data movement doc (#6793, @Lyndon-Li)
|
||||
* Use old(origin) namespace in resource modifier conditions in case namespace may change during restore (#6724, @27149chen)
|
||||
* Fix #6752: add namespace exclude check. (#6762, @blackpiglet)
|
||||
* Update restore controller logic for restore deletion (#6761, @ywk253100)
|
||||
* Fix issue #6753, remove the check for read-only BSL in restore async operation controller since Velero cannot fully support read-only mode BSL in restore at present (#6758, @Lyndon-Li)
|
||||
* Fixes #6636, skip subresource in resource discovery (#6688, @27149chen)
|
||||
* This pr made some improvements in Resource Modifiers:1. add label selector 2. change the field name from groupKind to groupResource (#6704, @27149chen)
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
## v1.13
|
||||
### 2024-01-10
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.13.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.13.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.13/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.13/upgrade-to-1.13/
|
||||
|
||||
### Highlights
|
||||
|
||||
#### Resource Modifier Enhancement
|
||||
Velero introduced the Resource Modifiers in v1.12.0. This feature allows users to specify a ConfigMap with a set of rules to modify the resources during restoration. However, only the JSON Patch is supported when creating the rules, and JSON Patch has some limitations, which cannot cover all use cases. In v1.13.0, Velero adds new support for JSON Merge Patch and Strategic Merge Patch, which provide more power and flexibility and allow users to use the same ConfigMap to apply patches on the resources. More design details can be found in [Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/merge-patch-and-strategic-in-resource-modifier.md) design. For instructions on how to use the feature, please refer to the [Resource Modifiers](https://velero.io/docs/v1.13/restore-resource-modifiers/) doc.
|
||||
|
||||
#### Node-Agent Concurrency
|
||||
Velero data movement activities from fs-backups and CSI snapshot data movements run in Velero node-agent, so may be hosted by every node in the cluster and consume resources (i.e. CPU, memory, network bandwidth) from there. With v1.13, users are allowed to configure how many data movement activities (a.k.a, loads) run in each node globally or by node, so that users can better leverage the performance of Velero data movement activities and the resource consumption in the cluster. For more information, check the [Node-Agent Concurrency](https://velero.io/docs/v1.13/node-agent-concurrency/) document.
|
||||
|
||||
#### Parallel Files Upload Options
|
||||
Velero now supports configurable options for parallel files upload when using Kopia uploader to do fs-backups or CSI snapshot data movements which makes speed up backup possible.
|
||||
For more information, please check [Here](https://velero.io/docs/v1.13/backup-reference/#parallel-files-upload).
|
||||
|
||||
#### Write Sparse Files Options
|
||||
If using fs-restore or CSI snapshot data movements, it’s supported to write sparse files during restore. For more information, please check [Here](https://velero.io/docs/v1.13/restore-reference/#write-sparse-files).
|
||||
|
||||
#### Backup Describe
|
||||
In v1.13, the Backup Volume section is added to the velero backup describe command output. The backup Volumes section describes information for all the volumes included in the backup of various backup types, i.e. native snapshot, fs-backup, CSI snapshot, and CSI snapshot data movement. Particularly, the velero backup description now supports showing the information of CSI snapshot data movements, which is not supported in v1.12.
|
||||
|
||||
Additionally, backup describe command will not check EnableCSI feature gate from client side, so if a backup has volumes with CSI snapshot or CSI snapshot data movement, backup describe command always shows the corresponding information in its output.
|
||||
|
||||
#### Backup's new VolumeInfo metadata
|
||||
Create a new metadata file in the backup repository's backup name sub-directory to store the backup-including PVC and PV information. The information includes the backing-up method of the PVC and PV data, snapshot information, and status. The VolumeInfo metadata file determines how the PV resource should be restored. The Velero downstream software can also use this metadata file to get a summary of the backup's volume data information.
|
||||
|
||||
#### Enhancement for CSI Snapshot Data Movements when Velero Pod Restart
|
||||
When performing backup and restore operations, enhancements have been implemented for Velero server pods or node agents to ensure that the current backup or restore process is not stuck or interrupted after restart due to certain exceptional circumstances.
|
||||
|
||||
#### New status fields added to show hook execution details
|
||||
Hook execution status is now included in the backup/restore CR status and displayed in the backup/restore describe command output. Specifically, it will show the number of hooks which attempted to execute under the HooksAttempted field and the number of hooks which failed to execute under the HooksFailed field.
|
||||
|
||||
#### AWS SDK Bump Up
|
||||
Bump up AWS SDK for Go to version 2, which offers significant performance improvements in CPU and memory utilization over version 1.
|
||||
|
||||
#### Azure AD/Workload Identity Support
|
||||
Azure AD/Workload Identity is the recommended approach to do the authentication with Azure services/AKS, Velero has introduced support for Azure AD/Workload Identity on the Velero Azure plugin side in previous releases, and in v1.13.0 Velero adds new support for Kopia operations(file system backup/data mover/etc.) with Azure AD/Workload Identity.
|
||||
|
||||
#### Runtime and dependencies
|
||||
To fix CVEs and keep pace with Golang, Velero made changes as follows:
|
||||
* Bump Golang runtime to v1.21.6.
|
||||
* Bump several dependent libraries to new versions.
|
||||
* Bump Kopia to v0.15.0.
|
||||
|
||||
|
||||
### Breaking changes
|
||||
* Backup describe command: due to the backup describe output enhancement, some existing information (i.e. the output for native snapshot, CSI snapshot, and fs-backup) has been moved to the Backup Volumes section with some format changes.
|
||||
* API type changes: changes the field [DataMoverConfig](https://github.com/vmware-tanzu/velero/blob/v1.13.0/pkg/apis/velero/v2alpha1/data_upload_types.go#L54) in DataUploadSpec from `*map[string][string]`` to `map[string]string`
|
||||
* Velero install command: due to the issue [#7264](https://github.com/vmware-tanzu/velero/issues/7264), v1.13.0 introduces a break change that make the informer cache enabled by default to keep the actual behavior consistent with the helper message(the informer cache is disabled by default before the change).
|
||||
|
||||
|
||||
### Limitations/Known issues
|
||||
* The backup's VolumeInfo metadata doesn't have the information updated in the async operations. This function could be supported in v1.14 release.
|
||||
|
||||
### Note
|
||||
* Velero introduces the informer cache which is enabled by default. The informer cache improves the restore performance but may cause higher memory consumption. Increase the memory limit of the Velero pod or disable the informer cache by specifying the `--disable-informer-cache` option when installing Velero if you get the OOM error.
|
||||
|
||||
### Deprecation announcement
|
||||
* The generated k8s clients, informers, and listers are deprecated in the Velero v1.13 release. They are put in the Velero repository's pkg/generated directory. According to the n+2 supporting policy, the deprecated are kept for two more releases. The pkg/generated directory should be deleted in the v1.15 release.
|
||||
* After the backup VolumeInfo metadata file is added to the backup, Velero decides how to restore the PV resource according to the VolumeInfo content. To support the backup generated by the older version of Velero, the old logic is also kept. The support for the backup without the VolumeInfo metadata file will be kept for two releases. The support logic will be deleted in the v1.15 release.
|
||||
|
||||
### All Changes
|
||||
* Make "disable-informer-cache" option false(enabled) by default to keep it consistent with the help message (#7294, @ywk253100)
|
||||
* Fix issue #6928, remove snapshot deletion timeout for PVB (#7282, @Lyndon-Li)
|
||||
* Do not set "targetNamespace" to namespace items (#7274, @reasonerjt)
|
||||
* Fix issue #7244. By the end of the upload, check the outstanding incomplete snapshots and delete them by calling ApplyRetentionPolicy (#7245, @Lyndon-Li)
|
||||
* Adjust the newline output of resource list in restore describer (#7238, @allenxu404)
|
||||
* Remove the redundant newline in backup describe output (#7229, @allenxu404)
|
||||
* Fix issue #7189, data mover generic restore - don't assume the first volume as the restore volume (#7201, @Lyndon-Li)
|
||||
* Update CSIVolumeSnapshotsCompleted in backup's status and the metric
|
||||
during backup finalize stage according to async operations content. (#7184, @blackpiglet)
|
||||
* Refactor DownloadRequest Stream function (#7175, @blackpiglet)
|
||||
* Add `--skip-immediately` flag to schedule commands; `--schedule-skip-immediately` server and install (#7169, @kaovilai)
|
||||
* Add node-agent concurrency doc and change the config name from dataPathConcurrency to loadCocurrency (#7161, @Lyndon-Li)
|
||||
* Enhance hooks tracker by adding a returned error to record function (#7153, @allenxu404)
|
||||
* Track the skipped PV when SnapshotVolumes set as false (#7152, @reasonerjt)
|
||||
* Add more linters part 2. (#7151, @blackpiglet)
|
||||
* Fix issue #7135, check pod status before checking node-agent pod status (#7150, @Lyndon-Li)
|
||||
* Treat namespace as a regular restorable item (#7143, @reasonerjt)
|
||||
* Allow sparse option for Kopia & Restic restore (#7141, @qiuming-best)
|
||||
* Use VolumeInfo to help restore the PV. (#7138, @blackpiglet)
|
||||
* Node agent restart enhancement (#7130, @qiuming-best)
|
||||
* Fix issue #6695, add describe for data mover backups (#7125, @Lyndon-Li)
|
||||
* Add hooks status to backup/restore CR (#7117, @allenxu404)
|
||||
* Include plugin name in the error message by operations (#7115, @reasonerjt)
|
||||
* Fix issue #7068, due to a behavior of CSI external snapshotter, manipulations of VS and VSC may not be handled in the same order inside external snapshotter as the API is called. So add a protection finalizer to ensure the order (#7102, @Lyndon-Li)
|
||||
* Generate VolumeInfo for backup. (#7100, @blackpiglet)
|
||||
* Fix issue #7094, fallback to full backup if previous snapshot is not found (#7096, @Lyndon-Li)
|
||||
* Fix issue #7068, due to an behavior of CSI external snapshotter, manipulations of VS and VSC may not be handled in the same order inside external snapshotter as the API is called. So add a protection finalizer to ensure the order (#7095, @Lyndon-Li)
|
||||
* Skip syncing the backup which doesn't contain backup metadata (#7081, @ywk253100)
|
||||
* Fix issue #6693, partially fail restore if CSI snapshot is involved but CSI feature is not ready, i.e., CSI feature gate is not enabled or CSI plugin is not installed. (#7077, @Lyndon-Li)
|
||||
* Truncate the credential file to avoid the change of secret content messing it up (#7072, @ywk253100)
|
||||
* Add VolumeInfo metadata structures. (#7070, @blackpiglet)
|
||||
* improve discoveryHelper.Refresh() in restore (#7069, @27149chen)
|
||||
* Add DataUpload Result and CSI VolumeSnapshot check for restore PV. (#7061, @blackpiglet)
|
||||
* Add the implementation for design #6950, configurable data path concurrency (#7059, @Lyndon-Li)
|
||||
* Make data mover fail early (#7052, @qiuming-best)
|
||||
* Remove dependency of generated client part 3. (#7051, @blackpiglet)
|
||||
* Update Backup.Status.CSIVolumeSnapshotsCompleted during finalize (#7046, @kaovilai)
|
||||
* Remove the Velero generated client. (#7041, @blackpiglet)
|
||||
* Fix issue #7027, data mover backup exposer should not assume the first volume as the backup volume in backup pod (#7038, @Lyndon-Li)
|
||||
* Read information from the credential specified by BSL (#7034, @ywk253100)
|
||||
* Fix #6857. Added check for matching Owner References when synchronizing backups, removing references that are not found/have mismatched uid. (#7032, @deefdragon)
|
||||
* Add description markers for dataupload and datadownload CRDs (#7028, @shubham-pampattiwar)
|
||||
* Add HealthCheckNodePort deletion logic for Service restore. (#7026, @blackpiglet)
|
||||
* Fix inconsistent behavior of Backup and Restore hook execution (#7022, @allenxu404)
|
||||
* Fix #6964. Don't use csiSnapshotTimeout (10 min) for waiting snapshot to readyToUse for data mover, so as to make the behavior complied with CSI snapshot backup (#7011, @Lyndon-Li)
|
||||
* restore: Use warning when Create IsAlreadyExist and Get error (#7004, @kaovilai)
|
||||
* Bump kopia to 0.15.0 (#7001, @Lyndon-Li)
|
||||
* Make Kopia file parallelism configurable (#7000, @qiuming-best)
|
||||
* Fix unified repository (kopia) s3 credentials profile selection (#6995, @kaovilai)
|
||||
* Fix #6988, always get region from BSL if it is not empty (#6990, @Lyndon-Li)
|
||||
* Limit PVC block mode logic to non-Windows platform. (#6989, @blackpiglet)
|
||||
* It is a valid case that the Status.RestoreSize field in VolumeSnapshot is not set, if so, get the volume size from the source PVC to create the backup PVC (#6976, @Lyndon-Li)
|
||||
* Check whether the action is a CSI action and whether CSI feature is enabled, before executing the action. (#6968, @blackpiglet)
|
||||
* Add the PV backup information design document. (#6962, @blackpiglet)
|
||||
* Change controller-runtime List option from MatchingFields to ListOptions (#6958, @blackpiglet)
|
||||
* Add the design for node-agent concurrency (#6950, @Lyndon-Li)
|
||||
* Import auth provider plugins (#6947, @0x113)
|
||||
* Fix #6668, add a limitation for file system restore parallelism with other types of restores (CSI snapshot restore, CSI snapshot movement restore) (#6946, @Lyndon-Li)
|
||||
* Add MSI Support for Azure plugin. (#6938, @yanggangtony)
|
||||
* Partially fix #6734, guide Kubernetes' scheduler to spread backup pods evenly across nodes as much as possible, so that data mover backup could achieve better parallelism (#6926, @Lyndon-Li)
|
||||
* Bump up aws sdk to aws-sdk-go-v2 (#6923, @reasonerjt)
|
||||
* Optional check if targeted container is ready before executing a hook (#6918, @Ripolin)
|
||||
* Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers (#6917, @27149chen)
|
||||
* Fix issue 6913: Velero Built-in Datamover: Backup stucks in phase WaitingForPluginOperations when Node Agent pod gets restarted (#6914, @shubham-pampattiwar)
|
||||
* Set ParallelUploadAboveSize as MaxInt64 and flush repo after setting up policy so that policy is retrieved correctly by TreeForSource (#6885, @Lyndon-Li)
|
||||
* Replace the base image with paketobuildpacks image (#6883, @ywk253100)
|
||||
* Fix issue #6859, move plugin depending podvolume functions to util pkg, so as to remove the dependencies to unnecessary repository packages like kopia, azure, etc. (#6875, @Lyndon-Li)
|
||||
* Fix #6861. Only Restic path requires repoIdentifier, so for non-restic path, set the repoIdentifier fields as empty in PVB and PVR and also remove the RepoIdentifier column in the get output of PVBs and PVRs (#6872, @Lyndon-Li)
|
||||
* Add volume types filter in resource policies (#6863, @qiuming-best)
|
||||
* change the metrics backup_attempt_total default value to 1. (#6838, @yanggangtony)
|
||||
* Bump kopia to v0.14 (#6833, @Lyndon-Li)
|
||||
* Retry failed create when using generateName (#6830, @sseago)
|
||||
* Fix issue #6786, always delete VSC regardless of the deletion policy (#6827, @Lyndon-Li)
|
||||
* Proposal to support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers (#6797, @27149chen)
|
||||
* Fix the node-agent missing metrics-address defines. (#6784, @yanggangtony)
|
||||
* Fix default BSL setting not work (#6771, @qiuming-best)
|
||||
* Update restore controller logic for restore deletion (#6770, @ywk253100)
|
||||
* Fix #6752: add namespace exclude check. (#6760, @blackpiglet)
|
||||
* Fix issue #6753, remove the check for read-only BSL in restore async operation controller since Velero cannot fully support read-only mode BSL in restore at present (#6757, @Lyndon-Li)
|
||||
* Fix issue #6647, add the --default-snapshot-move-data parameter to Velero install, so that users don't need to specify --snapshot-move-data per backup when they want to move snapshot data for all backups (#6751, @Lyndon-Li)
|
||||
* Use old(origin) namespace in resource modifier conditions in case namespace may change during restore (#6724, @27149chen)
|
||||
* Perf improvements for existing resource restore (#6723, @sseago)
|
||||
* Remove schedule-related metrics on schedule delete (#6715, @nilesh-akhade)
|
||||
* Kubernetes 1.27 new job label batch.kubernetes.io/controller-uid are deleted during restore per https://github.com/kubernetes/kubernetes/pull/114930 (#6712, @kaovilai)
|
||||
* This pr made some improvements in Resource Modifiers: 1. add label selector 2. change the field name from groupKind to groupResource (#6704, @27149chen)
|
||||
* Make Kopia support Azure AD (#6686, @ywk253100)
|
||||
* Add support for block volumes with Kopia (#6680, @dzaninovic)
|
||||
* Delete PartiallyFailed orphaned backups as well as Completed ones (#6649, @sseago)
|
||||
* Add CSI snapshot data movement doc (#6637, @Lyndon-Li)
|
||||
* Fixes #6636, skip subresource in resource discovery (#6635, @27149chen)
|
||||
* Add `orLabelSelectors` for backup, restore commands (#6475, @nilesh-akhade)
|
||||
* fix run preHook and postHook on completed pods (#5211, @cleverhu)
|
||||
@@ -1,105 +0,0 @@
|
||||
## v1.14
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.14.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.14.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.14/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.14/upgrade-to-1.14/
|
||||
|
||||
### Highlights
|
||||
|
||||
#### The maintenance work for kopia/restic backup repositories is run in jobs
|
||||
Since velero started using kopia as the approach for filesystem-level backup/restore, we've noticed an issue when velero connects to the kopia backup repositories and performs maintenance, it sometimes consumes excessive memory that can cause the velero pod to get OOM Killed. To mitigate this issue, the maintenance work will be moved out of velero pod to a separate kubernetes job, and the user will be able to specify the resource request in "velero install".
|
||||
#### Volume Policies are extended to support more actions to handle volumes
|
||||
In an earlier release, a flexible volume policy was introduced to skip certain volumes from a backup. In v1.14 we've made enhancement to this policy to allow the user to set how the volumes should be backed up. The user will be able to set "fs-backup" or "snapshot" as value of “action" in the policy and velero will backup the volumes accordingly. This enhancement allows the user to achieve a fine-grained control like "opt-in/out" without having to update the target workload. For more details please refer to https://velero.io/docs/v1.14/resource-filtering/#supported-volumepolicy-actions
|
||||
#### Node Selection for Data Movement Backup
|
||||
In velero the data movement flow relies on datamover pods, and these pods may take substantial resources and keep running for a long time. In v1.14, the user will be able to create a configmap to define the eligible nodes on which the datamover pods are launched. For more details refer to https://velero.io/docs/v1.14/data-movement-backup-node-selection/
|
||||
#### VolumeInfo metadata for restored volumes
|
||||
In v1.13, we introduced volumeinfo metadata for backup to help velero CLI and downstream adopter understand how velero handles each volume during backup. In v1.14, similar metadata will be persisted for each restore. velero CLI is also updated to bring more info in the output of "velero restore describe".
|
||||
#### "Finalizing" phase is introduced to restores
|
||||
The "Finalizing" phase is added to the state transition flow to restore, which helps us fix several issues: The labels added to PVs will be restored after the data in the PV is restored via volumesnapshotter. The post restore hook will be executed after datamovement is finished.
|
||||
#### Certificate-based authentication support for Azure
|
||||
Besides the service principal with secret(password)-based authentication, Velero introduces the new support for service principal with certificate-based authentication in v1.14.0. This approach enables you to adopt a phishing resistant authentication by using conditional access policies, which better protects Azure resources and is the recommended way by Azure.
|
||||
|
||||
### Runtime and dependencies
|
||||
* Golang runtime: v1.22.2
|
||||
* kopia: v0.17.0
|
||||
|
||||
### Limitations/Known issues
|
||||
* For the external BackupItemAction plugins that take snapshots for PVs, such as vsphere plugin. If the plugin checks the value of the field "snapshotVolumes" in the backup spec as a criteria for snapshot, the settings in the volume policy will not take effect. For example, if the "snapshotVolumes" is set to False in the backup spec, but a volume meets the condition in the volume policy for "snapshot" action, because the plugin will not check the settings in the volume policy, the plugin will not take snapshot for the volume. For more details please refer to #7818
|
||||
|
||||
### Breaking changes
|
||||
* CSI plugin has been merged into velero repo in v1.14 release. It will be installed by default as an internal plugin, and should not be installed via "–plugins " parameter in "velero install" command.
|
||||
* The default resource requests and limitations for node agent are removed in v1.14, to make the node agent pods have the QoS class of "BestEffort", more details please refer to #7391
|
||||
* There's a change in namespace filtering behavior during backup: In v1.14, when the includedNamespaces/excludedNamespaces fields are not set and the labelSelector/OrLabelSelectors are set in the backup spec, the backup will only include the namespaces which contain the resources that match the label selectors, while in previous releases all namespaces will be included in the backup with such settings. More details refer to #7105
|
||||
* Patching the PV in the "Finalizing" state may cause the restore to be in "PartiallyFailed" state when the PV is blocked in "Pending" state, while in the previous release the restore may end up being in "Complete" state. For more details refer to #7866
|
||||
|
||||
### All Changes
|
||||
* Fix backup log to show error string, not index (#7805, @piny940)
|
||||
* Modify the volume helper logic. (#7794, @blackpiglet)
|
||||
* Add documentation for extension of volume policy feature (#7779, @shubham-pampattiwar)
|
||||
* Surface errors when waiting for backupRepository and timeout occurs (#7762, @kaovilai)
|
||||
* Add existingResourcePolicy restore CR validation to controller (#7757, @kaovilai)
|
||||
* Fix condition matching in resource modifier when there are multiple rules (#7715, @27149chen)
|
||||
* Bump up the version of KinD and k8s in github actions (#7702, @reasonerjt)
|
||||
* Implementation for Extending VolumePolicies to support more actions (#7664, @shubham-pampattiwar)
|
||||
* Migrate from `github.com/Azure/azure-storage-blob-go` to `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` (#7598, @mmorel-35)
|
||||
* When Included/ExcludedNamespaces are omitted, and LabelSelector or OrLabelSelector is used, namespaces without selected items are excluded from backup. (#7697, @blackpiglet)
|
||||
* Display CSI snapshot restores in restore describe (#7687, @reasonerjt)
|
||||
* Use specific credential rather than the credential chain for Azure (#7680, @ywk253100)
|
||||
* Modify hook docs for clarity on displaying hook execution results (#7679, @allenxu404)
|
||||
* Wait for results of restore exec hook executions in Finalizing phase instead of InProgress phase (#7619, @allenxu404)
|
||||
* migrating to `sdk/resourcemanager/**/arm**` from `services/**/mgmt/**` (#7596, @mmorel-35)
|
||||
* Bump up to go1.22 (#7666, @reasonerjt)
|
||||
* Fix issue #7648. Adjust the exposing logic to avoid exposing failure and snapshot leak when expose fails (#7662, @Lyndon-Li)
|
||||
* Track and persist restore volume info (#7630, @reasonerjt)
|
||||
* Check the existence of the namespaces provided in the "--include-namespaces" option (#7569, @ywk253100)
|
||||
* Add the finalization phase to the restore workflow (#7377, @allenxu404)
|
||||
* Upgrade the version of go plugin related libs/tools (#7373, @ywk253100)
|
||||
* Check resource Group Version and Kind is available in cluster before attempting restore to prevent being stuck. (#7322, @kaovilai)
|
||||
* Merge CSI plugin code into Velero. (#7609, @blackpiglet)
|
||||
* Fix issue #7391, remove the default constraint for node-agent pods (#7488, @Lyndon-Li)
|
||||
* Fix DataDownload fails during restore for empty PVC workload (#7521, @qiuming-best)
|
||||
* Add repository maintenance job (#7451, @qiuming-best)
|
||||
* Check whether the VolumeSnapshot's source PVC is nil before using it.
|
||||
Skip populate VolumeInfo for data-moved PV when CSI is not enabled. (#7515, @blackpiglet)
|
||||
* Fix issue #7308, change the data path requeue time to 5 second for data mover backup/restore, PVB and PVR. (#7458, @Lyndon-Li)
|
||||
* Patch newly dynamically provisioned PV with volume info to restore custom setting of PV (#7504, @allenxu404)
|
||||
* Adjust the logic for the backup_last_status metrics to stop incorrectly incrementing over time (#7445, @allenxu404)
|
||||
* dependabot: support github-actions updates (#7594, @mmorel-35)
|
||||
* Include the design for adding the finalization phase to the restore workflow (#7317, @allenxu404)
|
||||
* Fix issue #7211. Enable advanced feature capability and add support to concatenate objects for unified repo. (#7452, @Lyndon-Li)
|
||||
* Add design to introduce restore volume info (#7610, @reasonerjt)
|
||||
* Increase the k8s client QPS/burst to avoid throttling request errors (#7311, @ywk253100)
|
||||
* Support update the backup VolumeInfos by the Async ops result. (#7554, @blackpiglet)
|
||||
* FS backup create PodVolumeBackup when the backup excluded PVC,
|
||||
so I added logic to skip PVC volume type when PVC is not included in the backup resources to be backed up. (#7472, @sbahar619)
|
||||
* Respect and use `credentialsFile` specified in BSL.spec.config when IRSA is configured over Velero Pod Environment credentials (#7374, @reasonerjt)
|
||||
* Move the native snapshot definition code into internal directory (#7544, @blackpiglet)
|
||||
* Fix issue #7036. Add the implementation of node selection for data mover backups (#7437, @Lyndon-Li)
|
||||
* Fix issue #7535, add the MustHave resource check during item collection and item filter for restore (#7585, @Lyndon-Li)
|
||||
* build(deps): bump json-patch to v5.8.0 (#7584, @mmorel-35)
|
||||
* Add confirm flag to velero plugin add (#7566, @kaovilai)
|
||||
* do not skip unknown gvr at the beginning and get new gr when kind is changed (#7523, @27149chen)
|
||||
* Fix snapshot leak for backup (#7558, @qiuming-best)
|
||||
* For issue #7036, add the document for data mover node selection (#7640, @Lyndon-Li)
|
||||
* Add design for Extending VolumePolicies to support more actions (#6956, @shubham-pampattiwar)
|
||||
* BackupRepositories associated with a BSL are invalidated when BSL is (re-)created. (#7380, @kaovilai)
|
||||
* Improve the concurrency for PVBs in different pods (#7571, @ywk253100)
|
||||
* Bump up Kopia to v0.16.0 and open kopia repo with no index change (#7559, @Lyndon-Li)
|
||||
* Bump up the versions of several Kubernetes-related libs (#7489, @ywk253100)
|
||||
* Make parallel restore configurable (#7512, @qiuming-best)
|
||||
* Support certificate-based authentication for Azure (#7549, @ywk253100)
|
||||
* Fix issue #7281, batch delete snapshots in the same repo (#7438, @Lyndon-Li)
|
||||
* Add CRD name to error message when it is not ready to use (#7295, @josemarevalo)
|
||||
* Add the design for node selection for data mover backup (#7383, @Lyndon-Li)
|
||||
* Bump up aws-sdk to latest version to leverage Pod Identity credentials. (#7307, @guikcd)
|
||||
* Fix issue #7246. Document the behavior for repo snapshot deletion (#7622, @Lyndon-Li)
|
||||
* Fix issue #7583, set backupName optional for Restore CRD (#7617, @Lyndon-Li)
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
## v1.15
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.15.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.15.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.15/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.15/upgrade-to-1.15/
|
||||
|
||||
### Highlights
|
||||
#### Data mover micro service
|
||||
Data transfer activities for CSI Snapshot Data Movement are moved from node-agent pods to dedicate backupPods or restorePods. This brings many benefits such as:
|
||||
- This avoids to access volume data through host path, while host path access is privileged and may involve security escalations, which are concerned by users.
|
||||
- This enables users to to control resource (i.e., cpu, memory) allocations in a granular manner, e.g., control them per backup/restore of a volume.
|
||||
- This enhances the resilience, crash of one data movement activity won't affect others.
|
||||
- This prevents unnecessary full backup because of host path changes after workload pods restart.
|
||||
- For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/vgdp-micro-service/vgdp-micro-service.md.
|
||||
|
||||
#### Item Block concepts and ItemBlockAction (IBA) plugin
|
||||
Item Block concepts are introduced for resource backups to help to achieve multiple thread backups. Specifically, correlated resources are categorized in the same item block and item blocks could be processed concurrently in multiple threads.
|
||||
ItemBlockAction plugin is introduced to help Velero to categorize resources into item blocks. At present, Velero provides built-in IBAs for pods and PVCs and Velero also supports customized IBAs for any resources.
|
||||
In v1.15, Velero doesn't support multiple thread process of item blocks though item block concepts and IBA plugins are fully supported. The multiple thread support will be delivered in future releases.
|
||||
For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/backup-performance-improvements.md.
|
||||
|
||||
#### Node selection for repository maintenance job
|
||||
Repository maintenance are resource consuming tasks, Velero now allows you to configure the nodes to run repository maintenance jobs, so that you can run repository maintenance jobs in idle nodes or avoid them to run in nodes hosting critical workloads.
|
||||
To support the configuration, a new repository maintenance configuration configMap is introduced.
|
||||
For more information, check the document https://velero.io/docs/v1.15/repository-maintenance/.
|
||||
|
||||
#### Backup PVC read-only configuration
|
||||
In 1.15, Velero allows you to configure the data mover backupPods to read-only mount the backupPVCs. In this way, the data mover expose process could be significantly accelerated for some storages (i.e., ceph).
|
||||
To support the configuration, a new backup PVC configuration configMap is introduced.
|
||||
For more information, check the document https://velero.io/docs/v1.15/data-movement-backup-pvc-configuration/.
|
||||
|
||||
#### Backup PVC storage class configuration
|
||||
In 1.15, Velero allows you to configure the storageclass used by the data mover backupPods. In this way, the provision of backupPVCs don't need to adhere to the same pattern as workload PVCs, e.g., for a backupPVC, it only needs one replica, whereas, the a workload PVC may have multiple replicas.
|
||||
To support the configuration, the same backup PVC configuration configMap is used.
|
||||
For more information, check the document https://velero.io/docs/v1.15/data-movement-backup-pvc-configuration/.
|
||||
|
||||
#### Backup repository data cache configuration
|
||||
The backup repository may need to cache data on the client side during various repository operations, i.e., read, write, maintenance, etc. The cache consumes the root file system space of the pod where the repository access happens.
|
||||
In 1.15, Velero allows you to configure the total size of the cache per repository. In this way, if your pod doesn't have enough space in its root file system, the pod won't be evicted due to running out of ephemeral storage.
|
||||
To support the configuration, a new backup repository configuration configMap is introduced.
|
||||
For more information, check the document https://velero.io/docs/v1.15/backup-repository-configuration/.
|
||||
|
||||
#### Performance improvements
|
||||
In 1.15, several performance related issues/enhancements are included, which makes significant performance improvements in specific scenarios:
|
||||
- There was a memory leak of Velero server after plugin calls, now it is fixed, see issue https://github.com/vmware-tanzu/velero/issues/7925
|
||||
- The `client-burst/client-qps` parameters are automatically inherited to plugins, so that you can use the same velero server parameters to accelerate the plugin executions when large number of API server calls happen, see issue https://github.com/vmware-tanzu/velero/issues/7806
|
||||
- Maintenance of Kopia repository takes huge memory in scenarios that huge number of files have been backed up, Velero 1.15 has included the Kopia upstream enhancement to fix the problem, see issue https://github.com/vmware-tanzu/velero/issues/7510
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: v1.22.8
|
||||
kopia: v0.17.0
|
||||
|
||||
### Limitations/Known issues
|
||||
#### Read-only backup PVC may not work on SELinux environments
|
||||
Due to an issue of Kubernetes upstream, if a volume is mounted as read-only in SELinux environments, the read privilege is not granted to any user, as a result, the data mover backup will fail. On the other hand, the backupPVC must be mounted as read-only in order to accelerate the data mover expose process.
|
||||
Therefore, a user option is added in the same backup PVC configuration configMap, once the option is enabled, the backupPod container will run as a super privileged container and disable SELinux access control. If you have concern in this super privileged container or you have configured [pod security admissions](https://kubernetes.io/docs/concepts/security/pod-security-admission/) and don't allow super privileged containers, you will not be able to use this read-only backupPVC feature and lose the benefit to accelerate the data mover expose process.
|
||||
|
||||
### Breaking changes
|
||||
#### Deprecation of Restic
|
||||
Restic path for fs-backup is in deprecation process starting from 1.15. According to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/v1.15/GOVERNANCE.md#deprecation-policy), for 1.15, if Restic path is used the backup/restore of fs-backup still creates and succeeds, but you will see warnings in below scenarios:
|
||||
- When `--uploader-type=restic` is used in Velero installation
|
||||
- When Restic path is used to create backup/restore of fs-backup
|
||||
|
||||
#### node-agent configuration name is configurable
|
||||
Previously, a fixed name is searched for node-agent configuration configMap. Now in 1.15, Velero allows you to customize the name of the configMap, on the other hand, the name must be specified by node-agent server parameter `node-agent-configmap`.
|
||||
|
||||
#### Repository maintenance job configurations in Velero server parameter are moved to repository maintenance job configuration configMap
|
||||
In 1.15, below Velero server parameters for repository maintenance jobs are moved to the repository maintenance job configuration configMap. While for back compatibility reason, the same Velero sever parameters are preserved as is. But the configMap is recommended and the same values in the configMap take preference if they exist in both places:
|
||||
```
|
||||
--keep-latest-maintenance-jobs
|
||||
--maintenance-job-cpu-request
|
||||
--maintenance-job-mem-request
|
||||
--maintenance-job-cpu-limit
|
||||
--maintenance-job-mem-limit
|
||||
```
|
||||
|
||||
#### Changing PVC selected-node feature is deprecated
|
||||
In 1.15, the [Changing PVC selected-node feature](https://velero.io/docs/v1.15/restore-reference/#changing-pvc-selected-node) enters deprecation process and will be removed in future releases according to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/v1.15/GOVERNANCE.md#deprecation-policy). Usage of this feature for any purpose is not recommended.
|
||||
|
||||
### All Changes
|
||||
* add no-relabeling option to backupPVC configmap (#8288, @sseago)
|
||||
* only set spec.volumes readonly if PVC is readonly for datamover (#8284, @sseago)
|
||||
* Add labels to maintenance job pods (#8256, @shubham-pampattiwar)
|
||||
* Add the Carvel package related resources to the restore priority list (#8228, @ywk253100)
|
||||
* Reduces indirect imports for plugin/framework importers (#8208, @kaovilai)
|
||||
* Add controller name to periodical_enqueue_source. The logger parameter now includes an additional field with the value of reflect.TypeOf(objList).String() and another field with the value of controllerName. (#8198, @kaovilai)
|
||||
* Update Openshift SCC docs link (#8170, @shubham-pampattiwar)
|
||||
* Partially fix issue #8138, add doc for node-agent memory preserve (#8167, @Lyndon-Li)
|
||||
* Pass Velero server command args to the plugins (#8166, @ywk253100)
|
||||
* Fix issue #8155, Merge Kopia upstream commits for critical issue fixes and performance improvements (#8158, @Lyndon-Li)
|
||||
* Implement the Repo maintenance Job configuration. (#8145, @blackpiglet)
|
||||
* Add document for data mover micro service (#8144, @Lyndon-Li)
|
||||
* Fix issue #8134, allow to config resource request/limit for data mover micro service pods (#8143, @Lyndon-Li)
|
||||
* Apply backupPVCConfig to backupPod volume spec (#8141, @shubham-pampattiwar)
|
||||
* Add resource modifier for velero restore describe CLI (#8139, @blackpiglet)
|
||||
* Fix issue #7620, add doc for backup repo config (#8131, @Lyndon-Li)
|
||||
* Modify E2E and perf test report generated directory (#8129, @blackpiglet)
|
||||
* Add docs for backup pvc config support (#8119, @shubham-pampattiwar)
|
||||
* Delete generated k8s client and informer. (#8114, @blackpiglet)
|
||||
* Add support for backup PVC configuration (#8109, @shubham-pampattiwar)
|
||||
* ItemBlock model and phase 1 (single-thread) workflow changes (#8102, @sseago)
|
||||
* Fix issue #8032, make node-agent configMap name configurable (#8097, @Lyndon-Li)
|
||||
* Fix issue #8072, add the warning messages for restic deprecation (#8096, @Lyndon-Li)
|
||||
* Fix issue #7620, add backup repository configuration implementation and support cacheLimit configuration for Kopia repo (#8093, @Lyndon-Li)
|
||||
* Patch dbr's status when error happens (#8086, @reasonerjt)
|
||||
* According to design #7576, after node-agent restarts, if a DU/DD is in InProgress status, re-capture the data mover ms pod and continue the execution (#8085, @Lyndon-Li)
|
||||
* Updates to IBM COS documentation to match current version (#8082, @gjanders)
|
||||
* Data mover micro service DUCR/DDCR controller refactor according to design #7576 (#8074, @Lyndon-Li)
|
||||
* add retries with timeout to existing patch calls that moves a backup/restore from InProgress/Finalizing to a final status phase. (#8068, @kaovilai)
|
||||
* Data mover micro service restore according to design #7576 (#8061, @Lyndon-Li)
|
||||
* Internal ItemBlockAction plugins (#8054, @sseago)
|
||||
* Data mover micro service backup according to design #7576 (#8046, @Lyndon-Li)
|
||||
* Avoid wrapping failed PVB status with empty message. (#8028, @mrnold)
|
||||
* Created new ItemBlockAction (IBA) plugin type (#8026, @sseago)
|
||||
* Make PVPatchMaximumDuration timeout configurable (#8021, @shubham-pampattiwar)
|
||||
* Reuse existing plugin manager for get/put volume info (#8012, @sseago)
|
||||
* Data mover ms watcher according to design #7576 (#7999, @Lyndon-Li)
|
||||
* New data path for data mover ms according to design #7576 (#7988, @Lyndon-Li)
|
||||
* For issue #7700 and #7747, add the design for backup PVC configurations (#7982, @Lyndon-Li)
|
||||
* Only get VolumeSnapshotClass when DataUpload exists. (#7974, @blackpiglet)
|
||||
* Fix issue #7972, sync the backupPVC deletion in expose clean up (#7973, @Lyndon-Li)
|
||||
* Expose the VolumeHelper to third-party plugins. (#7969, @blackpiglet)
|
||||
* Check whether the volume's source is PVC before fetching its PV. (#7967, @blackpiglet)
|
||||
* Check whether the namespaces specified in namespace filter exist. (#7965, @blackpiglet)
|
||||
* Add design for backup repository configurations for issue #7620, #7301 (#7963, @Lyndon-Li)
|
||||
* New data path for data mover ms according to design #7576 (#7955, @Lyndon-Li)
|
||||
* Skip PV patch step in Restoe workflow for WaitForFirstConsumer VolumeBindingMode Pending state PVCs (#7953, @shubham-pampattiwar)
|
||||
* Fix issue #7904, add the deprecation and limitation clarification for change PVC selected-node feature (#7948, @Lyndon-Li)
|
||||
* Expose the VolumeHelper to third-party plugins. (#7944, @blackpiglet)
|
||||
* Don't consider unschedulable pods unrecoverable (#7899, @sseago)
|
||||
* Upgrade to robfig/cron/v3 to support time zone specification. (#7793, @kaovilai)
|
||||
* Add the result in the backup's VolumeInfo. (#7775, @blackpiglet)
|
||||
* Migrate from github.com/golang/protobuf to google.golang.org/protobuf (#7593, @mmorel-35)
|
||||
* Add the design for data mover micro service (#7576, @Lyndon-Li)
|
||||
* Descriptive restore error when restoring into a terminating namespace. (#7424, @kaovilai)
|
||||
* Ignore missing path error in conditional match (#7410, @seanblong)
|
||||
* Propose a deprecation process for velero (#5532, @shubham-pampattiwar)
|
||||
@@ -1,156 +0,0 @@
|
||||
## v1.16
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### Highlights
|
||||
#### Windows cluster support
|
||||
In v1.16, Velero supports to run in Windows clusters and backup/restore Windows workloads, either stateful or stateless:
|
||||
* Hybrid build and all-in-one image: the build process is enhanced to build an all-in-one image for hybrid CPU architecture and hybrid platform. For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/multiple-arch-build-with-windows.md
|
||||
* Deployment in Windows clusters: Velero node-agent, data mover pods and maintenance jobs now support to run in both linux and Windows nodes
|
||||
* Data mover backup/restore Windows workloads: Velero built-in data mover supports Windows workloads throughout its full cycle, i.e., discovery, backup, restore, pre/post hook, etc. It automatically identifies Windows workloads and schedules data mover pods to the right group of nodes
|
||||
|
||||
Check the epic issue https://github.com/vmware-tanzu/velero/issues/8289 for more information.
|
||||
|
||||
#### Parallel Item Block backup
|
||||
v1.16 now supports to back up item blocks in parallel. Specifically, during backup, correlated resources are grouped in item blocks and Velero backup engine creates a thread pool to back up the item blocks in parallel. This significantly improves the backup throughput, especially when there are large scale of resources.
|
||||
Pre/post hooks also belongs to item blocks, so will also run in parallel along with the item blocks.
|
||||
Users are allowed to configure the parallelism through the `--item-block-worker-count` Velero server parameter. If not configured, the default parallelism is 1.
|
||||
|
||||
For more information, check issue https://github.com/vmware-tanzu/velero/issues/8334.
|
||||
|
||||
#### Data mover restore enhancement in scalability
|
||||
In previous releases, for each volume of WaitForFirstConsumer mode, data mover restore is only allowed to happen in the node that the volume is attached. This severely degrades the parallelism and the balance of node resource(CPU, memory, network bandwidth) consumption for data mover restore (https://github.com/vmware-tanzu/velero/issues/8044).
|
||||
|
||||
In v1.16, users are allowed to configure data mover restores running and spreading evenly across all nodes in the cluster. The configuration is done through a new flag `ignoreDelayBinding` in node-agent configuration (https://github.com/vmware-tanzu/velero/issues/8242).
|
||||
|
||||
#### Data mover enhancements in observability
|
||||
In 1.16, some observability enhancements are added:
|
||||
* Output various statuses of intermediate objects for failures of data mover backup/restore (https://github.com/vmware-tanzu/velero/issues/8267)
|
||||
* Output the errors when Velero fails to delete intermediate objects during clean up (https://github.com/vmware-tanzu/velero/issues/8125)
|
||||
|
||||
The outputs are in the same node-agent log and enabled automatically.
|
||||
|
||||
#### CSI snapshot backup/restore enhancement in usability
|
||||
In previous releases, a unnecessary VolumeSnapshotContent object is retained for each backup and synced to other clusters sharing the same backup storage location. And during restore, the retained VolumeSnapshotContent is also restored unnecessarily.
|
||||
|
||||
In 1.16, the retained VolumeSnapshotContent is removed from the backup, so no unnecessary CSI objects are synced or restored.
|
||||
|
||||
For more information, check issue https://github.com/vmware-tanzu/velero/issues/8725.
|
||||
|
||||
#### Backup Repository Maintenance enhancement in resiliency and observability
|
||||
In v1.16, some enhancements of backup repository maintenance are added to improve the observability and resiliency:
|
||||
* A new backup repository maintenance history section, called `RecentMaintenance`, is added to the BackupRepository CR. Specifically, for each BackupRepository, including start/completion time, completion status and error message. (https://github.com/vmware-tanzu/velero/issues/7810)
|
||||
* Running maintenance jobs are now recaptured after Velero server restarts. (https://github.com/vmware-tanzu/velero/issues/7753)
|
||||
* The maintenance job will not be launched for readOnly BackupStorageLocation. (https://github.com/vmware-tanzu/velero/issues/8238)
|
||||
* The backup repository will not try to initialize a new repository for readOnly BackupStorageLocation. (https://github.com/vmware-tanzu/velero/issues/8091)
|
||||
* Users now are allowed to configure the intervals of an effective maintenance in the way of `normalGC`, `fastGC` and `eagerGC`, through the `fullMaintenanceInterval` parameter in backupRepository configuration. (https://github.com/vmware-tanzu/velero/issues/8364)
|
||||
|
||||
#### Volume Policy enhancement of filtering volumes by PVC labels
|
||||
In v1.16, Volume Policy is extended to support filtering volumes by PVC labels. (https://github.com/vmware-tanzu/velero/issues/8256).
|
||||
|
||||
#### Resource Status restore per object
|
||||
In v1.16, users are allowed to define whether to restore resource status per object through an annotation `velero.io/restore-status` set on the object. (https://github.com/vmware-tanzu/velero/issues/8204).
|
||||
|
||||
#### Velero Restore Helper binary is merged into Velero image
|
||||
In v1.16, Velero banaries, i.e., velero, velero-helper and velero-restore-helper, are all included into the single Velero image. (https://github.com/vmware-tanzu/velero/issues/8484).
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: 1.23.7
|
||||
kopia: 0.19.0
|
||||
|
||||
### Limitations/Known issues
|
||||
#### Limitations of Windows support
|
||||
* fs-backup is not supported for Windows workloads and so fs-backup runs only in linux nodes for linux workloads
|
||||
* Backup/restore of NTFS extended attributes/advanced features are not supported, i.e., Security Descriptors, System/Hidden/ReadOnly attributes, Creation Time, NTFS Streams, etc.
|
||||
|
||||
### All Changes
|
||||
* Add third party annotation support for maintenance job, so that the declared third party annotations could be added to the maintenance job pods (#8812, @Lyndon-Li)
|
||||
* Fix issue #8803, use deterministic name to create backupRepository (#8808, @Lyndon-Li)
|
||||
* Refactor restoreItem and related functions to differentiate the backup resource name and the restore target resource name. (#8797, @blackpiglet)
|
||||
* ensure that PV is removed before VS is deleted (#8777, @ix-rzi)
|
||||
* host_pods should not be mandatory to node-agent (#8774, @mpryc)
|
||||
* Log doesn't show pv name, but displays %!s(MISSING) instead (#8771, @hu-keyu)
|
||||
* Fix issue #8754, add third party annotation support for data mover (#8770, @Lyndon-Li)
|
||||
* Add docs for volume policy with labels as a criteria (#8759, @shubham-pampattiwar)
|
||||
* Move pvc annotation removal from CSI RIA to regular PVC RIA (#8755, @sseago)
|
||||
* Add doc for maintenance history (#8747, @Lyndon-Li)
|
||||
* Fix issue #8733, add doc for restorePVC (#8737, @Lyndon-Li)
|
||||
* Fix issue #8426, add doc for Windows support (#8736, @Lyndon-Li)
|
||||
* Fix issue #8475, refactor build-from-source doc for hybrid image build (#8729, @Lyndon-Li)
|
||||
* Return directly if no pod volme backup are tracked (#8728, @ywk253100)
|
||||
* Fix issue #8706, for immediate volumes, there is no selected-node annotation on PVC, so deduce the attached node from VolumeAttachment CRs (#8715, @Lyndon-Li)
|
||||
* Add labels as a criteria for volume policy (#8713, @shubham-pampattiwar)
|
||||
* Copy SecurityContext from Containers[0] if present for PVR (#8712, @sseago)
|
||||
* Support pushing images to an insecure registry (#8703, @ywk253100)
|
||||
* Modify golangci configuration to make it work. (#8695, @blackpiglet)
|
||||
* Run backup post hooks inside ItemBlock synchronously (#8694, @ywk253100)
|
||||
* Add docs for object level status restore (#8693, @shubham-pampattiwar)
|
||||
* Clean artifacts generated during CSI B/R. (#8684, @blackpiglet)
|
||||
* Don't run maintenance on the ReadOnly BackupRepositories. (#8681, @blackpiglet)
|
||||
* Fix #8657: WaitGroup panic issue (#8679, @ywk253100)
|
||||
* Fixes issue #8214, validate `--from-schedule` flag in create backup command to prevent empty or whitespace-only values. (#8665, @aj-2000)
|
||||
* Implement parallel ItemBlock processing via backup_controller goroutines (#8659, @sseago)
|
||||
* Clean up leaked CSI snapshot for incomplete backup (#8637, @raesonerjt)
|
||||
* Handle update conflict when restoring the status (#8630, @ywk253100)
|
||||
* Fix issue #8419, support repo maintenance job to run on Windows nodes (#8626, @Lyndon-Li)
|
||||
* Always create DataUpload configmap in restore namespace (#8621, @sseago)
|
||||
* Fix issue #8091, avoid to create new repo when BSL is readonly (#8615, @Lyndon-Li)
|
||||
* Fix issue #8242, distribute dd evenly across nodes (#8611, @Lyndon-Li)
|
||||
* Fix issue #8497, update du/dd progress on completion (#8608, @Lyndon-Li)
|
||||
* Fix issue #8418, add Windows toleration to data mover pods (#8606, @Lyndon-Li)
|
||||
* Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue (#8603, @ywk253100)
|
||||
* Fix issue #8067, add tmp folder (/tmp for linux, C:\Windows\Temp for Windows) as an alternative of udmrepo's config file location (#8602, @Lyndon-Li)
|
||||
* Data mover restore for Windows (#8594, @Lyndon-Li)
|
||||
* Skip patching the PV in finalization for failed operation (#8591, @reasonerjt)
|
||||
* Fix issue #8579, set event burst to block event broadcaster from filtering events (#8590, @Lyndon-Li)
|
||||
* Configurable Kopia Maintenance Interval. backup-repository-configmap adds an option for configurable`fullMaintenanceInterval` where fastGC (12 hours), and eagerGC (6 hours) allowing for faster removal of deleted velero backups from kopia repo. (#8581, @kaovilai)
|
||||
* Fix issue #7753, recall repo maintenance history on Velero server restart (#8580, @Lyndon-Li)
|
||||
* Clear validation errors when schedule is valid (#8575, @ywk253100)
|
||||
* Merge restore helper image into Velero server image (#8574, @ywk253100)
|
||||
* Don't include excluded items in ItemBlocks (#8572, @sseago)
|
||||
* fs uploader and block uploader support Windows nodes (#8569, @Lyndon-Li)
|
||||
* Fix issue #8418, support data mover backup for Windows nodes (#8555, @Lyndon-Li)
|
||||
* Fix issue #8044, allow users to ignore delay binding the restorePVC of data mover when it is in WaitForFirstConsumer mode (#8550, @Lyndon-Li)
|
||||
* Fix issue #8539, validate uploader types when o.CRDsOnly is set to false only since CRD installation doesn't rely on uploader types (#8538, @Lyndon-Li)
|
||||
* Fix issue #7810, add maintenance history for backupRepository CRs (#8532, @Lyndon-Li)
|
||||
* Make fs-backup work on linux nodes with the new Velero deployment and disable fs-backup if the source/target pod is running in non-linux node (#8424) (#8518, @Lyndon-Li)
|
||||
* Fix issue: backup schedule pause/unpause doesn't work (#8512, @ywk253100)
|
||||
* Fix backup post hook issue #8159 (caused by #7571): always execute backup post hooks after PVBs are handled (#8509, @ywk253100)
|
||||
* Fix issue #8267, enhance the error message when expose fails (#8508, @Lyndon-Li)
|
||||
* Fix issue #8416, #8417, deploy Velero server and node-agent in linux/Windows hybrid env (#8504, @Lyndon-Li)
|
||||
* Design to add label selector as a criteria for volume policy (#8503, @shubham-pampattiwar)
|
||||
* Related to issue #8485, move the acceptedByNode and acceptedTimestamp to Status of DU/DD CRD (#8498, @Lyndon-Li)
|
||||
* Add SecurityContext to restore-helper (#8491, @reasonerjt)
|
||||
* Fix issue #8433, add third party labels to data mover pods when the same labels exist in node-agent pods (#8487, @Lyndon-Li)
|
||||
* Fix issue #8485, add an accepted time so as to count the prepare timeout (#8486, @Lyndon-Li)
|
||||
* Fix issue #8125, log diagnostic info for data mover exposers when expose timeout (#8482, @Lyndon-Li)
|
||||
* Fix issue #8415, implement multi-arch build and Windows build (#8476, @Lyndon-Li)
|
||||
* Pin kopia to 0.18.2 (#8472, @Lyndon-Li)
|
||||
* Add nil check for updating DataUpload VolumeInfo in finalizing phase (#8471, @blackpiglet)
|
||||
* Allowing Object-Level Resource Status Restore (#8464, @shubham-pampattiwar)
|
||||
* For issue #8429. Add the design for multi-arch build and windows build (#8459, @Lyndon-Li)
|
||||
* Upgrade go.mod k8s.io/ go.mod to v0.31.3 and implemented proper logger configuration for both client-go and controller-runtime libraries. This change ensures that logging format and level settings are properly applied throughout the codebase. The update improves logging consistency and control across the Velero system. (#8450, @kaovilai)
|
||||
* Add Design for Allowing Object-Level Resource Status Restore (#8403, @shubham-pampattiwar)
|
||||
* Fix issue #8391, check ErrCancelled from suffix of data mover pod's termination message (#8396, @Lyndon-Li)
|
||||
* Fix issue #8394, don't call closeDataPath in VGDP callbacks, otherwise, the VGDP cleanup will hang (#8395, @Lyndon-Li)
|
||||
* Adding support in velero Resource Policies for filtering PVs based on additional VolumeAttributes properties under CSI PVs (#8383, @mayankagg9722)
|
||||
* Add --item-block-worker-count flag to velero install and server (#8380, @sseago)
|
||||
* Make BackedUpItems thread safe (#8366, @sseago)
|
||||
* Include --annotations flag in backup and restore create commands (#8354, @alromeros)
|
||||
* Use aggregated discovery API to discovery API groups and resources (#8353, @ywk253100)
|
||||
* Copy "envFrom" from Velero server when creating maintenance jobs (#8343, @evhan)
|
||||
* Set hinting region to use for GetBucketRegion() in pkg/repository/config/aws.go (#8297, @kaovilai)
|
||||
* Bump up version of client-go and controller-runtime (#8275, @ywk253100)
|
||||
* fix(pkg/repository/maintenance): don't panic when there's no container statuses (#8271, @mcluseau)
|
||||
* Add Backup warning for inclusion of NS managed by ArgoCD (#8257, @shubham-pampattiwar)
|
||||
* Added tracking for deleted namespace status check in restore flow. (#8233, @sangitaray2021)
|
||||
@@ -1,143 +0,0 @@
|
||||
## v1.17
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.17.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.17.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.17/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.17/upgrade-to-1.17/
|
||||
|
||||
### Highlights
|
||||
#### Modernized fs-backup
|
||||
In v1.17, Velero fs-backup is modernized to the micro-service architecture, which brings below benefits:
|
||||
- Many features that were absent to fs-backup are now available, i.e., load concurrency control, cancel, resume on restart, etc.
|
||||
- fs-backup is more robust, the running backup/restore could survive from node-agent restart; and the resource allocation is in a more granular manner, the failure of one backup/restore won't impact others.
|
||||
- The resource usage of node-agent is steady, especially, the node-agent pods won't request huge memory and hold it for a long time.
|
||||
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md for more details.
|
||||
|
||||
#### fs-backup support Windows cluster
|
||||
In v1.17, Velero fs-backup supports to backup/restore Windows workloads. By leveraging the new micro-service architecture for fs-backup, data mover pods could run in Windows nodes and backup/restore Windows volumes. Together with CSI snapshot data movement for Windows which is delivered in 1.16, Velero now supports Windows workload backup/restore in full scenarios.
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md for more details.
|
||||
|
||||
#### Volume group snapshot support
|
||||
In v1.17, Velero supports [volume group snapshots](https://kubernetes.io/blog/2024/12/18/kubernetes-1-32-volume-group-snapshot-beta/) which is a beta feature in Kubernetes upstream, for both CSI snapshot backup and CSI snapshot data movement. This allows a snapshot to be taken from multiple volumes at the same point-in-time to achieve write order consistency, which is helpful to achieve better data consistency when multiple volumes being backed up are correlated.
|
||||
Check the document https://velero.io/docs/main/volume-group-snapshots/ for more details.
|
||||
|
||||
#### Priority class support
|
||||
In v1.17, [Kubernetes priority class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass) is supported for all modules across Velero. Specifically, users are allowed to configure priority class to Velero server, node-agent, data mover pods, backup repository maintenance jobs separately.
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/priority-class-name-support_design.md for more details.
|
||||
|
||||
#### Scalability and Resiliency improvements of data movers
|
||||
##### Reduce excessive number of data mover pods in Pending state
|
||||
In v1.17, Velero allows users to set a `PrepareQueueLength` in the node-agent configuration, data mover pods and volumes out of this number won't be created until data path quota is available, so that excessive number cluster resources won't be taken unnecessarily, which is particularly helpful for large scale environments. This improvement applies to all kinds of data movements, including fs-backup and CSI snapshot data movement.
|
||||
Check design https://github.com/vmware-tanzu/velero/blob/main/design/node-agent-load-soothing.md for more details.
|
||||
|
||||
##### Enhancement on node-agent restart handling for data movements
|
||||
In v1.17, data movements in all phases could survive from node-agent restart and resume themselves; when a data movement gets orphaned in special cases, e.g., cluster node absent, it could also be canceled appropriately after the restart. This improvement applies to all kinds of data movements, including fs-backup and CSI snapshot data movement.
|
||||
Check issue https://github.com/vmware-tanzu/velero/issues/8534 for more details.
|
||||
|
||||
##### CSI snapshot data movement restore node-selection and node-selection by storage class
|
||||
In v1.17, CSI snapshot data movement restore acquires the same node-selection capability as backup, that is, users could specify which nodes can/cannot run data mover pods for both backup and restore now. And users are also allowed to configure the node-selection per storage class, which is particularly helpful to the environments where a storage class are not usable by all cluster nodes.
|
||||
Check issue https://github.com/vmware-tanzu/velero/issues/8186 and https://github.com/vmware-tanzu/velero/issues/8223 for more details.
|
||||
|
||||
#### Include/exclude policy support for resource policy
|
||||
In v1.17, Velero resource policy supports `includeExcludePolicy` besides the existing `volumePolicy`. This allows users to set include/exclude filters for resources in a resource policy configmap, so that these filters are reusable among multiple backups.
|
||||
Check the document https://velero.io/docs/main/resource-filtering/#creating-resource-policies:~:text=resources%3D%22*%22-,Resource%20policies,-Velero%20provides%20resource for more details.
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: 1.24.6
|
||||
kopia: 0.21.1
|
||||
|
||||
### Limitations/Known issues
|
||||
|
||||
### Breaking changes
|
||||
#### Deprecation of Restic
|
||||
According to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/main/GOVERNANCE.md#deprecation-policy), backup of fs-backup under Restic path is removed in v1.17, so `--uploader-type=restic` is not a valid installation configuration anymore. This means you cannot create a backup under Restic path, but you can still restore from the previous backups under Restic path until v1.19.
|
||||
|
||||
#### Repository maintenance job configurations are removed from Velero server parameter
|
||||
Since the repository maintenance job configurations are moved to repository maintenance job configMap, in v1.17 below Velero sever parameters are removed:
|
||||
- --keep-latest-maintenance-jobs
|
||||
- --maintenance-job-cpu-request
|
||||
- --maintenance-job-mem-request
|
||||
- --maintenance-job-cpu-limit
|
||||
- --maintenance-job-mem-limit
|
||||
|
||||
### All Changes
|
||||
* Add ConfigMap parameters validation for install CLI and server start. (#9200, @blackpiglet)
|
||||
* Add priorityclasses to high priority restore list (#9175, @kaovilai)
|
||||
* Introduced context-based logger for backend implementations (Azure, GCS, S3, and Filesystem) (#9168, @priyansh17)
|
||||
* Fix issue #9140, add os=windows:NoSchedule toleration for Windows pods (#9165, @Lyndon-Li)
|
||||
* Remove the repository maintenance job parameters from velero server. (#9147, @blackpiglet)
|
||||
* Add include/exclude policy to resources policy (#9145, @reasonerjt)
|
||||
* Add ConfigMap support for keepLatestMaintenanceJobs with CLI parameter fallback (#9135, @shubham-pampattiwar)
|
||||
* Fix the dd and du's node affinity issue. (#9130, @blackpiglet)
|
||||
* Remove the WaitUntilVSCHandleIsReady from vs BIA. (#9124, @blackpiglet)
|
||||
* Add comprehensive Volume Group Snapshots documentation with workflow diagrams and examples (#9123, @shubham-pampattiwar)
|
||||
* Fix issue #9065, add doc for node-agent prepare queue length (#9118, @Lyndon-Li)
|
||||
* Fix issue #9095, update restore doc for PVC selected-node (#9117, @Lyndon-Li)
|
||||
* Update CSI Snapshot Data Movement doc for issue #8534, #8185 (#9113, @Lyndon-Li)
|
||||
* Fix issue #8986, refactor fs-backup doc after VGDP Micro Service for fs-backup (#9112, @Lyndon-Li)
|
||||
* Return error if timeout when checking server version (#9111, @ywk253100)
|
||||
* Update "Default Volumes to Fs Backup" to "File System Backup (Default)" (#9105, @shubham-pampattiwar)
|
||||
* Fix issue #9077, don't block backup deletion on list VS error (#9100, @Lyndon-Li)
|
||||
* Bump up Kopia to v0.21.1 (#9098, @Lyndon-Li)
|
||||
* Add imagePullSecrets inheritance for VGDP pod and maintenance job. (#9096, @blackpiglet)
|
||||
* Avoid checking the VS and VSC status in the backup finalizing phase. (#9092, @blackpiglet)
|
||||
* Fix issue #9053, Always remove selected-node annotation during PVC restore when no node mapping exists. Breaking change: Previously, the annotation was preserved if the node existed. (#9076, @Lyndon-Li)
|
||||
* Enable parameterized kubelet mount path during node-agent installation (#9074, @longxiucai)
|
||||
* Fix issue #8857, support third party tolerations for data mover pods (#9072, @Lyndon-Li)
|
||||
* Fix issue #8813, remove restic from the valid uploader type (#9069, @Lyndon-Li)
|
||||
* Fix issue #8185, allow users to disable pod volume host path mount for node-agent (#9068, @Lyndon-Li)
|
||||
* Fix #8344, add the design for a mechanism to soothe creation of data mover pods for DataUpload, DataDownload, PodVolumeBackup and PodVolumeRestore (#9067, @Lyndon-Li)
|
||||
* Fix #8344, add a mechanism to soothe creation of data mover pods for DataUpload, DataDownload, PodVolumeBackup and PodVolumeRestore (#9064, @Lyndon-Li)
|
||||
* Add Gauge metric for BSL availability (#9059, @reasonerjt)
|
||||
* Fix missing defaultVolumesToFsBackup flag output in Velero describe backup cmd (#9056, @shubham-pampattiwar)
|
||||
* Allow for proper tracking of multiple hooks per container (#9048, @sseago)
|
||||
* Make the backup repository controller doesn't invalidate the BSL on restart (#9046, @blackpiglet)
|
||||
* Removed username/password credential handling from newConfigCredential as azidentity.UsernamePasswordCredentialOptions is reported as deprecated. (#9041, @priyansh17)
|
||||
* Remove dependency with VolumeSnapshotClass in DataUpload. (#9040, @blackpiglet)
|
||||
* Fix issue #8961, cancel PVB/PVR on Velero server restart (#9031, @Lyndon-Li)
|
||||
* Fix issue #8962, resume PVB/PVR during node-agent restarts (#9030, @Lyndon-Li)
|
||||
* Bump kopia v0.20.1 (#9027, @Lyndon-Li)
|
||||
* Fix issue #8965, support PVB/PVR's cancel state in the backup/restore (#9026, @Lyndon-Li)
|
||||
* Fix Issue 8816 When specifying LabelSelector on restore, related items such as PVC and VolumeSnapshot are not included (#9024, @amastbau)
|
||||
* Fix issue #8963, add legacy PVR controller for Restic path (#9022, @Lyndon-Li)
|
||||
* Fix issue #8964, add Windows support for VGDP MS for fs-backup (#9021, @Lyndon-Li)
|
||||
* Accommodate VGS workflows in PVC CSI plugin (#9019, @shubham-pampattiwar)
|
||||
* Fix issue #8958, add VGDP MS PVB controller (#9015, @Lyndon-Li)
|
||||
* Fix issue #8959, add VGDP MS PVR controller (#9014, @Lyndon-Li)
|
||||
* Fix issue #8988, add data path for VGDP ms PVR (#9005, @Lyndon-Li)
|
||||
* Fix issue #8988, add data path for VGDP ms pvb (#8998, @Lyndon-Li)
|
||||
* Skip VS and VSC not created by backup. (#8990, @blackpiglet)
|
||||
* Make ResticIdentifier optional for kopia BackupRepositories (#8987, @kaovilai)
|
||||
* Fix issue #8960, implement PodVolume exposer for PVB/PVR (#8985, @Lyndon-Li)
|
||||
* fix: update mc command in minio-deployment example (#8982, @vishal-chdhry)
|
||||
* Fix issue #8957, add design for VGDP MS for fs-backup (#8979, @Lyndon-Li)
|
||||
* Add BSL status check for backup/restore operations. (#8976, @blackpiglet)
|
||||
* Mark BackupRepository not ready when BSL changed (#8975, @ywk253100)
|
||||
* Add support for [distributed snapshotting](https://github.com/kubernetes-csi/external-snapshotter/tree/4cedb3f45790ac593ebfa3324c490abedf739477?tab=readme-ov-file#distributed-snapshotting) (#8969, @flx5)
|
||||
* Fix issue #8534, refactor dm controllers to tolerate cancel request in more cases, e.g., node restart, node drain (#8952, @Lyndon-Li)
|
||||
* The backup and restore VGDP affinity enhancement implementation. (#8949, @blackpiglet)
|
||||
* Remove CSI VS and VSC metadata from backup. (#8946, @blackpiglet)
|
||||
* Extend PVCAction itemblock plugin to support grouping PVCs under VGS label key (#8944, @shubham-pampattiwar)
|
||||
* Copy security context from origin pod (#8943, @farodin91)
|
||||
* Add support for configuring VGS label key (#8938, @shubham-pampattiwar)
|
||||
* Add VolumeSnapshotContent into the RIA and the mustHave resource list. (#8924, @blackpiglet)
|
||||
* Mounted cloud credentials should not be world-readable (#8919, @sseago)
|
||||
* Warn for not found error in patching managed fields (#8902, @sseago)
|
||||
* Fix issue 8878, relief node os deduction error checks (#8891, @Lyndon-Li)
|
||||
* Skip namespace in terminating state in backup resource collection. (#8890, @blackpiglet)
|
||||
* Implement PriorityClass Support (#8883, @kaovilai)
|
||||
* Fix Velero adding restore-wait init container when not needed. (#8880, @kaovilai)
|
||||
* Pass the logger in kopia related operations. (#8875, @hu-keyu)
|
||||
* Inherit the dnsPolicy and dnsConfig from the node agent pod. This is done so that the kopia task uses the same configuration. (#8845, @flx5)
|
||||
* Add design for VolumeGroupSnapshot support (#8778, @shubham-pampattiwar)
|
||||
* Inherit k8s default volumeSnapshotClass. (#8719, @hu-keyu)
|
||||
* CLI automatically discovers and uses cacert from BSL for download requests (#8557, @kaovilai)
|
||||
* This PR aims to add s390x support to Velero binary. (#7505, @pandurangkhandeparker)
|
||||
@@ -1 +0,0 @@
|
||||
Add `--apply` flag to `install` command, allowing usage of Kubernetes apply to make changes to existing installs
|
||||
@@ -1 +0,0 @@
|
||||
feat: Enhance BackupStorageLocation with Secret-based CA certificate support
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #7725, add design for backup repo cache configuration
|
||||
@@ -1 +0,0 @@
|
||||
Add VolumePolicy support for PVC Phase conditions to allow skipping Pending PVCs
|
||||
@@ -1 +0,0 @@
|
||||
feat: Permit specifying annotations for the BackupPVC
|
||||
@@ -1 +0,0 @@
|
||||
Remove labels associated with previous backups
|
||||
@@ -1 +0,0 @@
|
||||
Get pod list once per namespace in pvc IBA
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9229, don't attach backupPVC to the source node
|
||||
@@ -1 +0,0 @@
|
||||
Update AzureAD Microsoft Authentication Library to v1.5.0
|
||||
@@ -1 +0,0 @@
|
||||
Protect VolumeSnapshot field from race condition during multi-thread backup
|
||||
@@ -1,10 +0,0 @@
|
||||
Implement wildcard namespace pattern expansion for backup namespace includes/excludes.
|
||||
|
||||
This change adds support for wildcard patterns (*, ?, [abc], {a,b,c}) in namespace includes and excludes during backup operations.
|
||||
When wildcard patterns are detected, they are expanded against the list of active namespaces in the cluster before the backup proceeds.
|
||||
|
||||
Key features:
|
||||
- Wildcard patterns in namespace includes/excludes are automatically detected and expanded
|
||||
- Pattern validation ensures unsupported patterns (regex, consecutive asterisks) are rejected
|
||||
- Empty wildcard results (e.g., "invalid*" matching no namespaces) correctly result in empty backups
|
||||
- Exact namespace names and "*" continue to work as before (no expansion needed)
|
||||
@@ -1 +0,0 @@
|
||||
Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment
|
||||
@@ -1 +0,0 @@
|
||||
Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #7904, remove the code and doc for PVC node selection
|
||||
@@ -1 +0,0 @@
|
||||
Implement concurrency control for cache of native VolumeSnapshotter plugin.
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9193, don't connect repo in repo controller
|
||||
@@ -1 +0,0 @@
|
||||
Add option for privileged fs-backup pod
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
@@ -1 +0,0 @@
|
||||
VerifyJSONConfigs verify every elements in Data.
|
||||
@@ -1 +0,0 @@
|
||||
Concurrent backup processing
|
||||
@@ -1 +0,0 @@
|
||||
Sanitize Azure HTTP responses in BSL status messages
|
||||
@@ -1 +0,0 @@
|
||||
Fix typos in documentation
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9332, add bytesDone for cache files
|
||||
@@ -1 +0,0 @@
|
||||
Add cache configuration to VGDP
|
||||
@@ -1 +0,0 @@
|
||||
Fix the Job build error when BackupReposiotry name longer than 63.
|
||||
@@ -1 +0,0 @@
|
||||
Add cache dir configuration for udmrepo
|
||||
@@ -1 +0,0 @@
|
||||
Add snapshotSize for DataDownload, PodVolumeRestore
|
||||
@@ -1 +0,0 @@
|
||||
Add incrementalSize to DU/PVB for reporting new/changed size
|
||||
@@ -1 +0,0 @@
|
||||
Support cache volume for generic restore exposer and pod volume exposer
|
||||
@@ -1 +0,0 @@
|
||||
Use hookIndex for recording multiple restore exec hooks.
|
||||
@@ -1 +0,0 @@
|
||||
Fix managed fields patch for resources using GenerateName
|
||||
@@ -1 +0,0 @@
|
||||
Track actual resource names for GenerateName in restore status
|
||||
@@ -1 +0,0 @@
|
||||
Add cache volume configuration
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9365, prevent fake completion notification due to multiple update of single PVR
|
||||
@@ -1 +0,0 @@
|
||||
Refactor repo provider interface for static configuration
|
||||
@@ -1 +0,0 @@
|
||||
don't copy securitycontext from first container if configmap found
|
||||
@@ -1 +0,0 @@
|
||||
Cache volume support for DataDownload
|
||||
@@ -1 +0,0 @@
|
||||
Cache volume for PVR
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9400, connect repo first time after creation so that init params could be written
|
||||
@@ -1 +0,0 @@
|
||||
Add Prometheus metrics for maintenance jobs
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9276, add doc for cache volume support
|
||||
@@ -1 +0,0 @@
|
||||
Apply volume policies to VolumeGroupSnapshot PVC filtering
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9194, add doc for GOMAXPROCS behavior change
|
||||
@@ -1 +0,0 @@
|
||||
Remove VolumeSnapshotClass from CSI B/R process.
|
||||
@@ -1 +0,0 @@
|
||||
Add PVC-to-Pod cache to improve volume policy performance
|
||||
@@ -1 +0,0 @@
|
||||
Fix plugin init container names exceeding DNS-1123 limit
|
||||
@@ -1 +0,0 @@
|
||||
Add maintenance job and data mover pod's labels and annotations setting.
|
||||
@@ -1 +0,0 @@
|
||||
Add Role, RoleBinding, ClusterRole, and ClusterRoleBinding in restore sequence.
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9478, add diagnose info on expose peek fails
|
||||
@@ -1,19 +1,3 @@
|
||||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -66,14 +66,14 @@ func done() bool {
|
||||
doneFile := filepath.Join("/restores", child.Name(), ".velero", os.Args[1])
|
||||
|
||||
if _, err := os.Stat(doneFile); os.IsNotExist(err) {
|
||||
fmt.Printf("The filesystem restore done file %s is not found yet. Retry later.\n", doneFile)
|
||||
fmt.Printf("Not found: %s\n", doneFile)
|
||||
return false
|
||||
} else if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR looking filesystem restore done file %s: %s\n", doneFile, err)
|
||||
fmt.Fprintf(os.Stderr, "ERROR looking for %s: %s\n", doneFile, err)
|
||||
return false
|
||||
}
|
||||
|
||||
fmt.Printf("Found the done file %s\n", doneFile)
|
||||
fmt.Printf("Found %s", doneFile)
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
name: backuprepositories.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -26,19 +26,14 @@ spec:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -46,21 +41,13 @@ spec:
|
||||
description: BackupRepositorySpec is the specification for a BackupRepository.
|
||||
properties:
|
||||
backupStorageLocation:
|
||||
description: |-
|
||||
BackupStorageLocation is the name of the BackupStorageLocation
|
||||
description: BackupStorageLocation is the name of the BackupStorageLocation
|
||||
that should contain this repository.
|
||||
type: string
|
||||
maintenanceFrequency:
|
||||
description: MaintenanceFrequency is how often maintenance should
|
||||
be run.
|
||||
type: string
|
||||
repositoryConfig:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: RepositoryConfig is for repository-specific configuration
|
||||
fields.
|
||||
nullable: true
|
||||
type: object
|
||||
repositoryType:
|
||||
description: RepositoryType indicates the type of the backend repository
|
||||
enum:
|
||||
@@ -69,26 +56,25 @@ spec:
|
||||
- ""
|
||||
type: string
|
||||
resticIdentifier:
|
||||
description: |-
|
||||
ResticIdentifier is the full restic-compatible string for identifying
|
||||
this repository. This field is only used when RepositoryType is "restic".
|
||||
description: ResticIdentifier is the full restic-compatible string
|
||||
for identifying this repository.
|
||||
type: string
|
||||
volumeNamespace:
|
||||
description: |-
|
||||
VolumeNamespace is the namespace this backup repository contains
|
||||
pod volume backups for.
|
||||
description: VolumeNamespace is the namespace this backup repository
|
||||
contains pod volume backups for.
|
||||
type: string
|
||||
required:
|
||||
- backupStorageLocation
|
||||
- maintenanceFrequency
|
||||
- resticIdentifier
|
||||
- volumeNamespace
|
||||
type: object
|
||||
status:
|
||||
description: BackupRepositoryStatus is the current status of a BackupRepository.
|
||||
properties:
|
||||
lastMaintenanceTime:
|
||||
description: LastMaintenanceTime is the last time repo maintenance
|
||||
succeeded.
|
||||
description: LastMaintenanceTime is the last time maintenance was
|
||||
run.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
@@ -103,33 +89,6 @@ spec:
|
||||
- Ready
|
||||
- NotReady
|
||||
type: string
|
||||
recentMaintenance:
|
||||
description: RecentMaintenance is status of the recent repo maintenance.
|
||||
items:
|
||||
properties:
|
||||
completeTimestamp:
|
||||
description: CompleteTimestamp is the completion time of the
|
||||
repo maintenance.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
description: Message is a message about the current status of
|
||||
the repo maintenance.
|
||||
type: string
|
||||
result:
|
||||
description: Result is the result of the repo maintenance.
|
||||
enum:
|
||||
- Succeeded
|
||||
- Failed
|
||||
type: string
|
||||
startTimestamp:
|
||||
description: StartTimestamp is the start time of the repo maintenance.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
name: backups.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -17,24 +17,18 @@ spec:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
Backup is a Velero resource that represents the capture of Kubernetes
|
||||
description: Backup is a Velero resource that represents the capture of Kubernetes
|
||||
cluster state at a point in time (API objects and associated volume state).
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -42,62 +36,55 @@ spec:
|
||||
description: BackupSpec defines the specification for a Velero backup.
|
||||
properties:
|
||||
csiSnapshotTimeout:
|
||||
description: |-
|
||||
CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to
|
||||
ReadyToUse during creation, before returning error as timeout.
|
||||
The default value is 10 minute.
|
||||
description: CSISnapshotTimeout specifies the time used to wait for
|
||||
CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
returning error as timeout. The default value is 10 minute.
|
||||
type: string
|
||||
datamover:
|
||||
description: |-
|
||||
DataMover specifies the data mover to be used by the backup.
|
||||
If DataMover is "" or "velero", the built-in data mover will be used.
|
||||
description: DataMover specifies the data mover to be used by the
|
||||
backup. If DataMover is "" or "velero", the built-in data mover
|
||||
will be used.
|
||||
type: string
|
||||
defaultVolumesToFsBackup:
|
||||
description: |-
|
||||
DefaultVolumesToFsBackup specifies whether pod volume file system backup should be used
|
||||
for all volumes by default.
|
||||
description: DefaultVolumesToFsBackup specifies whether pod volume
|
||||
file system backup should be used for all volumes by default.
|
||||
nullable: true
|
||||
type: boolean
|
||||
defaultVolumesToRestic:
|
||||
description: |-
|
||||
DefaultVolumesToRestic specifies whether restic should be used to take a
|
||||
backup of all pod volumes by default.
|
||||
|
||||
Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead.
|
||||
description: "DefaultVolumesToRestic specifies whether restic should
|
||||
be used to take a backup of all pod volumes by default. \n Deprecated:
|
||||
this field is no longer used and will be removed entirely in future.
|
||||
Use DefaultVolumesToFsBackup instead."
|
||||
nullable: true
|
||||
type: boolean
|
||||
excludedClusterScopedResources:
|
||||
description: |-
|
||||
ExcludedClusterScopedResources is a slice of cluster-scoped
|
||||
resource type names to exclude from the backup.
|
||||
If set to "*", all cluster-scoped resource types are excluded.
|
||||
The default value is empty.
|
||||
description: ExcludedClusterScopedResources is a slice of cluster-scoped
|
||||
resource type names to exclude from the backup. If set to "*", all
|
||||
cluster-scoped resource types are excluded. The default value is
|
||||
empty.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
excludedNamespaceScopedResources:
|
||||
description: |-
|
||||
ExcludedNamespaceScopedResources is a slice of namespace-scoped
|
||||
resource type names to exclude from the backup.
|
||||
If set to "*", all namespace-scoped resource types are excluded.
|
||||
The default value is empty.
|
||||
description: ExcludedNamespaceScopedResources is a slice of namespace-scoped
|
||||
resource type names to exclude from the backup. If set to "*", all
|
||||
namespace-scoped resource types are excluded. The default value
|
||||
is empty.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
excludedNamespaces:
|
||||
description: |-
|
||||
ExcludedNamespaces contains a list of namespaces that are not
|
||||
included in the backup.
|
||||
description: ExcludedNamespaces contains a list of namespaces that
|
||||
are not included in the backup.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
excludedResources:
|
||||
description: |-
|
||||
ExcludedResources is a slice of resource names that are not
|
||||
included in the backup.
|
||||
description: ExcludedResources is a slice of resource names that are
|
||||
not included in the backup.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
@@ -110,9 +97,9 @@ spec:
|
||||
description: Resources are hooks that should be executed when
|
||||
backing up individual instances of a resource.
|
||||
items:
|
||||
description: |-
|
||||
BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on
|
||||
the rules defined for namespaces, resources, and label selector.
|
||||
description: BackupResourceHookSpec defines one or more BackupResourceHooks
|
||||
that should be executed based on the rules defined for namespaces,
|
||||
resources, and label selector.
|
||||
properties:
|
||||
excludedNamespaces:
|
||||
description: ExcludedNamespaces specifies the namespaces
|
||||
@@ -129,17 +116,17 @@ spec:
|
||||
nullable: true
|
||||
type: array
|
||||
includedNamespaces:
|
||||
description: |-
|
||||
IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies
|
||||
description: IncludedNamespaces specifies the namespaces
|
||||
to which this hook spec applies. If empty, it applies
|
||||
to all namespaces.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
includedResources:
|
||||
description: |-
|
||||
IncludedResources specifies the resources to which this hook spec applies. If empty, it applies
|
||||
to all resources.
|
||||
description: IncludedResources specifies the resources to
|
||||
which this hook spec applies. If empty, it applies to
|
||||
all resources.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
@@ -153,8 +140,8 @@ spec:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
description: A label selector requirement is a selector
|
||||
that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
@@ -162,33 +149,33 @@ spec:
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are In,
|
||||
NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
description: values is an array of string values.
|
||||
If the operator is In or NotIn, the values array
|
||||
must be non-empty. If the operator is Exists
|
||||
or DoesNotExist, the values array must be empty.
|
||||
This array is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
description: matchLabels is a map of {key,value} pairs.
|
||||
A single {key,value} in the matchLabels map is equivalent
|
||||
to an element of matchExpressions, whose key field
|
||||
is "key", the operator is "In", and the values array
|
||||
contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
@@ -196,9 +183,10 @@ spec:
|
||||
description: Name is the name of this hook.
|
||||
type: string
|
||||
post:
|
||||
description: |-
|
||||
PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup.
|
||||
These are executed after all "additional items" from item actions are processed.
|
||||
description: PostHooks is a list of BackupResourceHooks
|
||||
to execute after storing the item in the backup. These
|
||||
are executed after all "additional items" from item actions
|
||||
are processed.
|
||||
items:
|
||||
description: BackupResourceHook defines a hook for a resource.
|
||||
properties:
|
||||
@@ -213,9 +201,10 @@ spec:
|
||||
minItems: 1
|
||||
type: array
|
||||
container:
|
||||
description: |-
|
||||
Container is the container in the pod where the command should be executed. If not specified,
|
||||
the pod's first container is used.
|
||||
description: Container is the container in the
|
||||
pod where the command should be executed. If
|
||||
not specified, the pod's first container is
|
||||
used.
|
||||
type: string
|
||||
onError:
|
||||
description: OnError specifies how Velero should
|
||||
@@ -226,9 +215,9 @@ spec:
|
||||
- Fail
|
||||
type: string
|
||||
timeout:
|
||||
description: |-
|
||||
Timeout defines the maximum amount of time Velero should wait for the hook to complete before
|
||||
considering the execution a failure.
|
||||
description: Timeout defines the maximum amount
|
||||
of time Velero should wait for the hook to complete
|
||||
before considering the execution a failure.
|
||||
type: string
|
||||
required:
|
||||
- command
|
||||
@@ -238,9 +227,10 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
pre:
|
||||
description: |-
|
||||
PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup.
|
||||
These are executed before any "additional items" from item actions are processed.
|
||||
description: PreHooks is a list of BackupResourceHooks to
|
||||
execute prior to storing the item in the backup. These
|
||||
are executed before any "additional items" from item actions
|
||||
are processed.
|
||||
items:
|
||||
description: BackupResourceHook defines a hook for a resource.
|
||||
properties:
|
||||
@@ -255,9 +245,10 @@ spec:
|
||||
minItems: 1
|
||||
type: array
|
||||
container:
|
||||
description: |-
|
||||
Container is the container in the pod where the command should be executed. If not specified,
|
||||
the pod's first container is used.
|
||||
description: Container is the container in the
|
||||
pod where the command should be executed. If
|
||||
not specified, the pod's first container is
|
||||
used.
|
||||
type: string
|
||||
onError:
|
||||
description: OnError specifies how Velero should
|
||||
@@ -268,9 +259,9 @@ spec:
|
||||
- Fail
|
||||
type: string
|
||||
timeout:
|
||||
description: |-
|
||||
Timeout defines the maximum amount of time Velero should wait for the hook to complete before
|
||||
considering the execution a failure.
|
||||
description: Timeout defines the maximum amount
|
||||
of time Velero should wait for the hook to complete
|
||||
before considering the execution a failure.
|
||||
type: string
|
||||
required:
|
||||
- command
|
||||
@@ -286,99 +277,91 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
includeClusterResources:
|
||||
description: |-
|
||||
IncludeClusterResources specifies whether cluster-scoped resources
|
||||
should be included for consideration in the backup.
|
||||
description: IncludeClusterResources specifies whether cluster-scoped
|
||||
resources should be included for consideration in the backup.
|
||||
nullable: true
|
||||
type: boolean
|
||||
includedClusterScopedResources:
|
||||
description: |-
|
||||
IncludedClusterScopedResources is a slice of cluster-scoped
|
||||
resource type names to include in the backup.
|
||||
If set to "*", all cluster-scoped resource types are included.
|
||||
The default value is empty, which means only related
|
||||
cluster-scoped resources are included.
|
||||
description: IncludedClusterScopedResources is a slice of cluster-scoped
|
||||
resource type names to include in the backup. If set to "*", all
|
||||
cluster-scoped resource types are included. The default value is
|
||||
empty, which means only related cluster-scoped resources are included.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
includedNamespaceScopedResources:
|
||||
description: |-
|
||||
IncludedNamespaceScopedResources is a slice of namespace-scoped
|
||||
resource type names to include in the backup.
|
||||
The default value is "*".
|
||||
description: IncludedNamespaceScopedResources is a slice of namespace-scoped
|
||||
resource type names to include in the backup. The default value
|
||||
is "*".
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
includedNamespaces:
|
||||
description: |-
|
||||
IncludedNamespaces is a slice of namespace names to include objects
|
||||
from. If empty, all namespaces are included.
|
||||
description: IncludedNamespaces is a slice of namespace names to include
|
||||
objects from. If empty, all namespaces are included.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
includedResources:
|
||||
description: |-
|
||||
IncludedResources is a slice of resource names to include
|
||||
description: IncludedResources is a slice of resource names to include
|
||||
in the backup. If empty, all resources are included.
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
itemOperationTimeout:
|
||||
description: |-
|
||||
ItemOperationTimeout specifies the time used to wait for asynchronous BackupItemAction operations
|
||||
The default value is 4 hour.
|
||||
description: ItemOperationTimeout specifies the time used to wait
|
||||
for asynchronous BackupItemAction operations The default value is
|
||||
1 hour.
|
||||
type: string
|
||||
labelSelector:
|
||||
description: |-
|
||||
LabelSelector is a metav1.LabelSelector to filter with
|
||||
when adding individual objects to the backup. If empty
|
||||
or nil, all objects are included. Optional.
|
||||
description: LabelSelector is a metav1.LabelSelector to filter with
|
||||
when adding individual objects to the backup. If empty or nil, all
|
||||
objects are included. Optional.
|
||||
nullable: true
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements.
|
||||
The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
description: A label selector requirement is a selector that
|
||||
contains values, a key, and an operator that relates the key
|
||||
and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
description: operator represents a key's relationship to
|
||||
a set of values. Valid operators are In, NotIn, Exists
|
||||
and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
description: values is an array of string values. If the
|
||||
operator is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the values
|
||||
array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
description: matchLabels is a map of {key,value} pairs. A single
|
||||
{key,value} in the matchLabels map is equivalent to an element
|
||||
of matchExpressions, whose key field is "key", the operator
|
||||
is "In", and the values array contains only "value". The requirements
|
||||
are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
@@ -390,58 +373,56 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
orLabelSelectors:
|
||||
description: |-
|
||||
OrLabelSelectors is list of metav1.LabelSelector to filter with
|
||||
when adding individual objects to the backup. If multiple provided
|
||||
description: OrLabelSelectors is list of metav1.LabelSelector to filter
|
||||
with when adding individual objects to the backup. If multiple provided
|
||||
they will be joined by the OR operator. LabelSelector as well as
|
||||
OrLabelSelectors cannot co-exist in backup request, only one of them
|
||||
can be used.
|
||||
OrLabelSelectors cannot co-exist in backup request, only one of
|
||||
them can be used.
|
||||
items:
|
||||
description: |-
|
||||
A label selector is a label query over a set of resources. The result of matchLabels and
|
||||
matchExpressions are ANDed. An empty label selector matches all objects. A null
|
||||
label selector matches no objects.
|
||||
description: A label selector is a label query over a set of resources.
|
||||
The result of matchLabels and matchExpressions are ANDed. An empty
|
||||
label selector matches all objects. A null label selector matches
|
||||
no objects.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements.
|
||||
The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
description: A label selector requirement is a selector that
|
||||
contains values, a key, and an operator that relates the
|
||||
key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are In, NotIn, Exists
|
||||
and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
description: values is an array of string values. If the
|
||||
operator is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the values
|
||||
array must be empty. This array is replaced during a
|
||||
strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
description: matchLabels is a map of {key,value} pairs. A single
|
||||
{key,value} in the matchLabels map is equivalent to an element
|
||||
of matchExpressions, whose key field is "key", the operator
|
||||
is "In", and the values array contains only "value". The requirements
|
||||
are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
@@ -450,10 +431,11 @@ spec:
|
||||
orderedResources:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
OrderedResources specifies the backup order of resources of specific Kind.
|
||||
The map key is the resource name and value is a list of object names separated by commas.
|
||||
Each resource name has format "namespace/objectname". For cluster resources, simply use "objectname".
|
||||
description: OrderedResources specifies the backup order of resources
|
||||
of specific Kind. The map key is the resource name and value is
|
||||
a list of object names separated by commas. Each resource name has
|
||||
format "namespace/objectname". For cluster resources, simply use
|
||||
"objectname".
|
||||
nullable: true
|
||||
type: object
|
||||
resourcePolicy:
|
||||
@@ -461,10 +443,10 @@ spec:
|
||||
that backup should follow
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
description: APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in
|
||||
the core API group. For any other third-party types, APIGroup
|
||||
is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
@@ -483,10 +465,8 @@ spec:
|
||||
nullable: true
|
||||
type: boolean
|
||||
snapshotVolumes:
|
||||
description: |-
|
||||
SnapshotVolumes specifies whether to take snapshots
|
||||
of any PV's referenced in the set of objects included
|
||||
in the Backup.
|
||||
description: SnapshotVolumes specifies whether to take snapshots of
|
||||
any PV's referenced in the set of objects included in the Backup.
|
||||
nullable: true
|
||||
type: boolean
|
||||
storageLocation:
|
||||
@@ -494,22 +474,8 @@ spec:
|
||||
BackupStorageLocation where the backup should be stored.
|
||||
type: string
|
||||
ttl:
|
||||
description: |-
|
||||
TTL is a time.Duration-parseable string describing how long
|
||||
the Backup should be retained for.
|
||||
type: string
|
||||
uploaderConfig:
|
||||
description: UploaderConfig specifies the configuration for the uploader.
|
||||
nullable: true
|
||||
properties:
|
||||
parallelFilesUpload:
|
||||
description: ParallelFilesUpload is the number of files parallel
|
||||
uploads to perform when using the uploader.
|
||||
type: integer
|
||||
type: object
|
||||
volumeGroupSnapshotLabelKey:
|
||||
description: VolumeGroupSnapshotLabelKey specifies the label key to
|
||||
group PVCs under a VGS.
|
||||
description: TTL is a time.Duration-parseable string describing how
|
||||
long the Backup should be retained for.
|
||||
type: string
|
||||
volumeSnapshotLocations:
|
||||
description: VolumeSnapshotLocations is a list containing names of
|
||||
@@ -522,44 +488,39 @@ spec:
|
||||
description: BackupStatus captures the current status of a Velero backup.
|
||||
properties:
|
||||
backupItemOperationsAttempted:
|
||||
description: |-
|
||||
BackupItemOperationsAttempted is the total number of attempted
|
||||
async BackupItemAction operations for this backup.
|
||||
description: BackupItemOperationsAttempted is the total number of
|
||||
attempted async BackupItemAction operations for this backup.
|
||||
type: integer
|
||||
backupItemOperationsCompleted:
|
||||
description: |-
|
||||
BackupItemOperationsCompleted is the total number of successfully completed
|
||||
async BackupItemAction operations for this backup.
|
||||
description: BackupItemOperationsCompleted is the total number of
|
||||
successfully completed async BackupItemAction operations for this
|
||||
backup.
|
||||
type: integer
|
||||
backupItemOperationsFailed:
|
||||
description: |-
|
||||
BackupItemOperationsFailed is the total number of async
|
||||
BackupItemAction operations for this backup which ended with an error.
|
||||
description: BackupItemOperationsFailed is the total number of async
|
||||
BackupItemAction operations for this backup which ended with an
|
||||
error.
|
||||
type: integer
|
||||
completionTimestamp:
|
||||
description: |-
|
||||
CompletionTimestamp records the time a backup was completed.
|
||||
Completion time is recorded even on failed backups.
|
||||
Completion time is recorded before uploading the backup object.
|
||||
The server's time is used for CompletionTimestamps
|
||||
description: CompletionTimestamp records the time a backup was completed.
|
||||
Completion time is recorded even on failed backups. Completion time
|
||||
is recorded before uploading the backup object. The server's time
|
||||
is used for CompletionTimestamps
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
csiVolumeSnapshotsAttempted:
|
||||
description: |-
|
||||
CSIVolumeSnapshotsAttempted is the total number of attempted
|
||||
description: CSIVolumeSnapshotsAttempted is the total number of attempted
|
||||
CSI VolumeSnapshots for this backup.
|
||||
type: integer
|
||||
csiVolumeSnapshotsCompleted:
|
||||
description: |-
|
||||
CSIVolumeSnapshotsCompleted is the total number of successfully
|
||||
description: CSIVolumeSnapshotsCompleted is the total number of successfully
|
||||
completed CSI VolumeSnapshots for this backup.
|
||||
type: integer
|
||||
errors:
|
||||
description: |-
|
||||
Errors is a count of all error messages that were generated during
|
||||
execution of the backup. The actual errors are in the backup's log
|
||||
file in object storage.
|
||||
description: Errors is a count of all error messages that were generated
|
||||
during execution of the backup. The actual errors are in the backup's
|
||||
log file in object storage.
|
||||
type: integer
|
||||
expiration:
|
||||
description: Expiration is when this Backup is eligible for garbage-collection.
|
||||
@@ -574,28 +535,10 @@ spec:
|
||||
description: FormatVersion is the backup format version, including
|
||||
major, minor, and patch version.
|
||||
type: string
|
||||
hookStatus:
|
||||
description: HookStatus contains information about the status of the
|
||||
hooks.
|
||||
nullable: true
|
||||
properties:
|
||||
hooksAttempted:
|
||||
description: |-
|
||||
HooksAttempted is the total number of attempted hooks
|
||||
Specifically, HooksAttempted represents the number of hooks that failed to execute
|
||||
and the number of hooks that executed successfully.
|
||||
type: integer
|
||||
hooksFailed:
|
||||
description: HooksFailed is the total number of hooks which ended
|
||||
with an error
|
||||
type: integer
|
||||
type: object
|
||||
phase:
|
||||
description: Phase is the current state of the Backup.
|
||||
enum:
|
||||
- New
|
||||
- Queued
|
||||
- ReadyToStart
|
||||
- FailedValidation
|
||||
- InProgress
|
||||
- WaitingForPluginOperations
|
||||
@@ -608,67 +551,53 @@ spec:
|
||||
- Deleting
|
||||
type: string
|
||||
progress:
|
||||
description: |-
|
||||
Progress contains information about the backup's execution progress. Note
|
||||
that this information is best-effort only -- if Velero fails to update it
|
||||
during a backup for any reason, it may be inaccurate/stale.
|
||||
description: Progress contains information about the backup's execution
|
||||
progress. Note that this information is best-effort only -- if Velero
|
||||
fails to update it during a backup for any reason, it may be inaccurate/stale.
|
||||
nullable: true
|
||||
properties:
|
||||
itemsBackedUp:
|
||||
description: |-
|
||||
ItemsBackedUp is the number of items that have actually been written to the
|
||||
backup tarball so far.
|
||||
description: ItemsBackedUp is the number of items that have actually
|
||||
been written to the backup tarball so far.
|
||||
type: integer
|
||||
totalItems:
|
||||
description: |-
|
||||
TotalItems is the total number of items to be backed up. This number may change
|
||||
throughout the execution of the backup due to plugins that return additional related
|
||||
items to back up, the velero.io/exclude-from-backup label, and various other
|
||||
description: TotalItems is the total number of items to be backed
|
||||
up. This number may change throughout the execution of the backup
|
||||
due to plugins that return additional related items to back
|
||||
up, the velero.io/exclude-from-backup label, and various other
|
||||
filters that happen as items are processed.
|
||||
type: integer
|
||||
type: object
|
||||
queuePosition:
|
||||
description: |-
|
||||
QueuePosition is the position of the backup in the queue.
|
||||
Only relevant when Phase is "Queued"
|
||||
type: integer
|
||||
startTimestamp:
|
||||
description: |-
|
||||
StartTimestamp records the time a backup was started.
|
||||
Separate from CreationTimestamp, since that value changes
|
||||
on restores.
|
||||
description: StartTimestamp records the time a backup was started.
|
||||
Separate from CreationTimestamp, since that value changes on restores.
|
||||
The server's time is used for StartTimestamps
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
validationErrors:
|
||||
description: |-
|
||||
ValidationErrors is a slice of all validation errors (if
|
||||
applicable).
|
||||
description: ValidationErrors is a slice of all validation errors
|
||||
(if applicable).
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
version:
|
||||
description: |-
|
||||
Version is the backup format major version.
|
||||
Deprecated: Please see FormatVersion
|
||||
description: 'Version is the backup format major version. Deprecated:
|
||||
Please see FormatVersion'
|
||||
type: integer
|
||||
volumeSnapshotsAttempted:
|
||||
description: |-
|
||||
VolumeSnapshotsAttempted is the total number of attempted
|
||||
description: VolumeSnapshotsAttempted is the total number of attempted
|
||||
volume snapshots for this backup.
|
||||
type: integer
|
||||
volumeSnapshotsCompleted:
|
||||
description: |-
|
||||
VolumeSnapshotsCompleted is the total number of successfully
|
||||
description: VolumeSnapshotsCompleted is the total number of successfully
|
||||
completed volume snapshots for this backup.
|
||||
type: integer
|
||||
warnings:
|
||||
description: |-
|
||||
Warnings is a count of all warning messages that were generated during
|
||||
execution of the backup. The actual warnings are in the backup's log
|
||||
file in object storage.
|
||||
description: Warnings is a count of all warning messages that were
|
||||
generated during execution of the backup. The actual warnings are
|
||||
in the backup's log file in object storage.
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
name: backupstoragelocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -40,19 +40,14 @@ spec:
|
||||
objects
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -86,13 +81,8 @@ spec:
|
||||
valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
@@ -113,38 +103,10 @@ spec:
|
||||
description: Bucket is the bucket to use for object storage.
|
||||
type: string
|
||||
caCert:
|
||||
description: |-
|
||||
CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
Deprecated: Use CACertRef instead.
|
||||
description: CACert defines a CA bundle to use when verifying
|
||||
TLS connections to the provider.
|
||||
format: byte
|
||||
type: string
|
||||
caCertRef:
|
||||
description: |-
|
||||
CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
when verifying TLS connections to the provider. The Secret must be in the same
|
||||
namespace as the BackupStorageLocation.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must be
|
||||
a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be
|
||||
defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: Prefix is the path inside a bucket to use for Velero
|
||||
storage. Optional.
|
||||
@@ -169,34 +131,29 @@ spec:
|
||||
BackupStorageLocation
|
||||
properties:
|
||||
accessMode:
|
||||
description: |-
|
||||
AccessMode is an unused field.
|
||||
|
||||
Deprecated: there is now an AccessMode field on the Spec and this field
|
||||
will be removed entirely as of v2.0.
|
||||
description: "AccessMode is an unused field. \n Deprecated: there
|
||||
is now an AccessMode field on the Spec and this field will be removed
|
||||
entirely as of v2.0."
|
||||
enum:
|
||||
- ReadOnly
|
||||
- ReadWrite
|
||||
type: string
|
||||
lastSyncedRevision:
|
||||
description: |-
|
||||
LastSyncedRevision is the value of the `metadata/revision` file in the backup
|
||||
storage location the last time the BSL's contents were synced into the cluster.
|
||||
|
||||
Deprecated: this field is no longer updated or used for detecting changes to
|
||||
the location's contents and will be removed entirely in v2.0.
|
||||
description: "LastSyncedRevision is the value of the `metadata/revision`
|
||||
file in the backup storage location the last time the BSL's contents
|
||||
were synced into the cluster. \n Deprecated: this field is no longer
|
||||
updated or used for detecting changes to the location's contents
|
||||
and will be removed entirely in v2.0."
|
||||
type: string
|
||||
lastSyncedTime:
|
||||
description: |-
|
||||
LastSyncedTime is the last time the contents of the location were synced into
|
||||
the cluster.
|
||||
description: LastSyncedTime is the last time the contents of the location
|
||||
were synced into the cluster.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
lastValidationTime:
|
||||
description: |-
|
||||
LastValidationTime is the last time the backup store location was validated
|
||||
the cluster.
|
||||
description: LastValidationTime is the last time the backup store
|
||||
location was validated the cluster.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
name: deletebackuprequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -29,19 +29,14 @@ spec:
|
||||
description: DeleteBackupRequest is a request to delete one or more backups.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user