mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-27 15:12:07 +00:00
Compare commits
33 Commits
remove_cha
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
540dfebf07 | ||
|
|
7b7b6bc2db | ||
|
|
d96434c8c9 | ||
|
|
64e3643006 | ||
|
|
758f6a4847 | ||
|
|
f6b3852d2f | ||
|
|
981b29b4cb | ||
|
|
7688579f75 | ||
|
|
e63486b677 | ||
|
|
bea82a61d6 | ||
|
|
3fc33d3c46 | ||
|
|
99d87aae5b | ||
|
|
960a596e7b | ||
|
|
695a94707d | ||
|
|
8d7957dfae | ||
|
|
a3169aeff3 | ||
|
|
e4726b2389 | ||
|
|
9dc27555bc | ||
|
|
39892abef2 | ||
|
|
c565da2ea6 | ||
|
|
324c2fb448 | ||
|
|
c870eb1645 | ||
|
|
dc3da29f3e | ||
|
|
2579ef1093 | ||
|
|
fa374b6143 | ||
|
|
67cf896eaf | ||
|
|
ad11b38468 | ||
|
|
b9cf90f11c | ||
|
|
f947092f1a | ||
|
|
82367e7ff6 | ||
|
|
49b2851f08 | ||
|
|
cbadd9047f | ||
|
|
764975ba29 |
4
.github/workflows/e2e-test-kind.yaml
vendored
4
.github/workflows/e2e-test-kind.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
2
.github/workflows/get-go-version.yaml
vendored
2
.github/workflows/get-go-version.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
version: ${{ steps.pick-version.outputs.version }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- id: pick-version
|
||||
run: |
|
||||
|
||||
2
.github/workflows/nightly-trivy-scan.yml
vendored
2
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
|
||||
2
.github/workflows/pr-changelog-check.yml
vendored
2
.github/workflows/pr-changelog-check.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Changelog check
|
||||
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'kind/changelog-not-required') || contains(github.event.pull_request.labels.*.name, 'Design') || contains(github.event.pull_request.labels.*.name, 'Website') || contains(github.event.pull_request.labels.*.name, 'Documentation'))}}
|
||||
|
||||
2
.github/workflows/pr-ci-check.yml
vendored
2
.github/workflows/pr-ci-check.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
2
.github/workflows/pr-codespell.yml
vendored
2
.github/workflows/pr-codespell.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@master
|
||||
|
||||
2
.github/workflows/pr-containers.yml
vendored
2
.github/workflows/pr-containers.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
name: Checkout
|
||||
|
||||
- name: Set up QEMU
|
||||
|
||||
2
.github/workflows/pr-goreleaser.yml
vendored
2
.github/workflows/pr-goreleaser.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
name: Checkout
|
||||
|
||||
- name: Verify .goreleaser.yml and try a dryrun release.
|
||||
|
||||
6
.github/workflows/pr-linter-check.yml
vendored
6
.github/workflows/pr-linter-check.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.1.1
|
||||
version: v2.5.0
|
||||
args: --verbose
|
||||
|
||||
2
.github/workflows/push-builder.yml
vendored
2
.github/workflows/push-builder.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
|
||||
# there are at least two commits: the first one is the merge commit and the second one is the real commit
|
||||
|
||||
4
.github/workflows/push.yml
vendored
4
.github/workflows/push.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
ref: ${{ github.ref_name }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
2
.github/workflows/rebase.yml
vendored
2
.github/workflows/rebase.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the latest code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Automatic Rebase
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.24 as tilt-helper
|
||||
FROM golang:1.25 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
1
changelogs/unreleased/9350-blackpiglet
Normal file
1
changelogs/unreleased/9350-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Fix the Job build error when BackupReposiotry name longer than 63.
|
||||
1
changelogs/unreleased/9397-Lyndon-Li
Normal file
1
changelogs/unreleased/9397-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Cache volume for PVR
|
||||
1
changelogs/unreleased/9407-Lyndon-Li
Normal file
1
changelogs/unreleased/9407-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9400, connect repo first time after creation so that init params could be written
|
||||
1
changelogs/unreleased/9418-Lyndon-Li
Normal file
1
changelogs/unreleased/9418-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9276, add doc for cache volume support
|
||||
1
changelogs/unreleased/9419-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9419-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Apply volume policies to VolumeGroupSnapshot PVC filtering
|
||||
1
changelogs/unreleased/9420-Lyndon-Li
Normal file
1
changelogs/unreleased/9420-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9194, add doc for GOMAXPROCS behavior change
|
||||
18
go.mod
18
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.24.0
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
@@ -41,10 +41,9 @@ require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/mod v0.26.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/mod v0.29.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/text v0.27.0
|
||||
golang.org/x/text v0.31.0
|
||||
google.golang.org/api v0.241.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
@@ -180,13 +179,14 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
32
go.sum
32
go.sum
@@ -794,8 +794,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -833,8 +833,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -880,8 +880,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -908,8 +908,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -973,14 +973,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -990,8 +990,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1051,8 +1051,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.24-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.25-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -94,7 +94,7 @@ RUN ARCH=$(go env GOARCH) && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.1
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
|
||||
@@ -621,8 +621,30 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace)
|
||||
}
|
||||
|
||||
// Filter PVCs by volume policy
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
|
||||
// Warn if any PVCs were filtered out
|
||||
if len(filteredPVCs) < len(groupedPVCs) {
|
||||
for _, originalPVC := range groupedPVCs {
|
||||
found := false
|
||||
for _, filteredPVC := range filteredPVCs {
|
||||
if originalPVC.Name == filteredPVC.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
p.log.Warnf("PVC %s/%s has VolumeGroupSnapshot label %s=%s but is excluded by volume policy", originalPVC.Namespace, originalPVC.Name, vgsLabelKey, group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the CSI driver for the grouped PVCs
|
||||
driver, err := p.determineCSIDriver(groupedPVCs)
|
||||
driver, err := p.determineCSIDriver(filteredPVCs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to determine CSI driver for PVCs in VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
@@ -643,7 +665,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
}
|
||||
|
||||
// Wait for all the VS objects associated with the VGS to have status and VGS Name (VS readiness is checked in legacy flow) and get the PVC-to-VS map
|
||||
vsMap, err := p.waitForVGSAssociatedVS(ctx, groupedPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
vsMap, err := p.waitForVGSAssociatedVS(ctx, filteredPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "timeout waiting for VolumeSnapshots to have status created via VolumeGroupSnapshot %s", newVGS.Name)
|
||||
}
|
||||
@@ -734,6 +756,40 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
|
||||
return pvcList.Items, nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
backup *velerov1api.Backup,
|
||||
) ([]corev1api.PersistentVolumeClaim, error) {
|
||||
var filteredPVCs []corev1api.PersistentVolumeClaim
|
||||
|
||||
for _, pvc := range pvcs {
|
||||
// Convert PVC to unstructured for ShouldPerformSnapshotWithBackup
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
// Check if this PVC should be snapshotted according to volume policies
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
|
||||
if shouldSnapshot {
|
||||
filteredPVCs = append(filteredPVCs, pvc)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredPVCs, nil
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) determineCSIDriver(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
) (string, error) {
|
||||
|
||||
@@ -586,6 +586,280 @@ func TestListGroupedPVCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterPVCsByVolumePolicy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvcs []corev1api.PersistentVolumeClaim
|
||||
pvs []corev1api.PersistentVolume
|
||||
volumePolicyStr string
|
||||
expectCount int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "All PVCs should be included when no volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-1", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-1",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-2",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-1"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-2"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCount: 2,
|
||||
},
|
||||
{
|
||||
name: "Filter out NFS PVC by volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-csi",
|
||||
StorageClassName: pointer.String("sc-1"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 1,
|
||||
},
|
||||
{
|
||||
name: "All PVCs filtered out by volume policy",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-1", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs-1",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-2", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs-2",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-1"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export/1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-2"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export/2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 0,
|
||||
},
|
||||
{
|
||||
name: "Filter out non-CSI PVCs from mixed driver group",
|
||||
pvcs: []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-linstor",
|
||||
Namespace: "ns-1",
|
||||
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-linstor",
|
||||
StorageClassName: pointer.String("sc-linstor"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-nfs",
|
||||
Namespace: "ns-1",
|
||||
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
},
|
||||
pvs: []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-linstor"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "linstor.csi.linbit.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePolicyStr: `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
expectCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
objs := []runtime.Object{}
|
||||
for i := range tt.pvs {
|
||||
objs = append(objs, &tt.pvs[i])
|
||||
}
|
||||
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{},
|
||||
}
|
||||
|
||||
// Add volume policy ConfigMap if specified
|
||||
if tt.volumePolicyStr != "" {
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "volume-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"volume-policy": tt.volumePolicyStr,
|
||||
},
|
||||
}
|
||||
require.NoError(t, client.Create(t.Context(), cm))
|
||||
|
||||
backup.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "volume-policy",
|
||||
}
|
||||
}
|
||||
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result, tt.expectCount)
|
||||
|
||||
// For mixed driver scenarios, verify filtered result can determine single CSI driver
|
||||
if tt.name == "Filter out non-CSI PVCs from mixed driver group" && len(result) > 0 {
|
||||
driver, err := action.determineCSIDriver(result)
|
||||
require.NoError(t, err, "After filtering, determineCSIDriver should not fail with multiple drivers error")
|
||||
require.Equal(t, "linstor.csi.linbit.com", driver, "Should have the Linstor driver after filtering out NFS")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetermineCSIDriver(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
|
||||
if os.Getenv(cmdtest.CaptureFlag) == "1" {
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestNewDownloadCommand(t *testing.T) {
|
||||
assert.NoError(t, e)
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
_, stderr, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
require.NoError(t, err)
|
||||
@@ -84,7 +84,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
e = d.Execute()
|
||||
require.NoError(t, e)
|
||||
|
||||
cmd = exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd = exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err = veleroexec.RunCommand(cmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestNewDeleteCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
c.Execute()
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
_, stderr, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestSetCommand_Execute(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
_, stderr, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package bug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@@ -147,7 +148,7 @@ func getKubectlVersion() (string, error) {
|
||||
return "", errors.New("kubectl not found on PATH")
|
||||
}
|
||||
|
||||
kubectlCmd := exec.Command("kubectl", "version")
|
||||
kubectlCmd := exec.CommandContext(context.Background(), "kubectl", "version")
|
||||
var outbuf bytes.Buffer
|
||||
kubectlCmd.Stdout = &outbuf
|
||||
if err := kubectlCmd.Start(); err != nil {
|
||||
@@ -207,16 +208,17 @@ func renderToString(bugInfo *VeleroBugInfo) (string, error) {
|
||||
// a platform specific binary.
|
||||
func showIssueInBrowser(body string) error {
|
||||
url := issueURL + "?body=" + url.QueryEscape(body)
|
||||
ctx := context.Background()
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return exec.Command("open", url).Start()
|
||||
return exec.CommandContext(ctx, "open", url).Start()
|
||||
case "linux":
|
||||
if cmdExistsOnPath("xdg-open") {
|
||||
return exec.Command("xdg-open", url).Start()
|
||||
return exec.CommandContext(ctx, "xdg-open", url).Start()
|
||||
}
|
||||
return fmt.Errorf("velero can't open a browser window using the command '%s'", "xdg-open")
|
||||
case "windows":
|
||||
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
return exec.CommandContext(ctx, "rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
default:
|
||||
return fmt.Errorf("velero can't open a browser window on platform %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
@@ -363,7 +363,7 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
||||
}
|
||||
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup)
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, s.backupRepoConfigs, cachePVCConfig, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup, s.repoConfigMgr)
|
||||
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
|
||||
if os.Getenv(cmdtest.CaptureFlag) == "1" {
|
||||
return
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ func TestNewGetCommand(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
|
||||
stdout, _, err := veleroexec.RunCommand(cmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -275,7 +275,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
log.WithError(err).Warn("Failed to get keepLatestMaintenanceJobs from ConfigMap, using CLI parameter value")
|
||||
}
|
||||
|
||||
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs, log); err != nil {
|
||||
if err := maintenance.DeleteOldJobs(r.Client, *backupRepo, keepJobs, log); err != nil {
|
||||
log.WithError(err).Warn("Failed to delete old maintenance jobs")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,15 +48,18 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/datapath"
|
||||
"github.com/vmware-tanzu/velero/pkg/exposer"
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
|
||||
"github.com/vmware-tanzu/velero/pkg/restorehelper"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
|
||||
logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeRestoreReconciler {
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, backupRepoConfigs map[string]string,
|
||||
cacheVolumeConfigs *velerotypes.CachePVC, podResources corev1api.ResourceRequirements, logger logrus.FieldLogger, dataMovePriorityClass string,
|
||||
privileged bool, repoConfigMgr repository.ConfigManager) *PodVolumeRestoreReconciler {
|
||||
return &PodVolumeRestoreReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -65,6 +68,8 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
|
||||
nodeName: nodeName,
|
||||
clock: &clocks.RealClock{},
|
||||
podResources: podResources,
|
||||
backupRepoConfigs: backupRepoConfigs,
|
||||
cacheVolumeConfigs: cacheVolumeConfigs,
|
||||
dataPathMgr: dataPathMgr,
|
||||
vgdpCounter: counter,
|
||||
preparingTimeout: preparingTimeout,
|
||||
@@ -73,6 +78,7 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
|
||||
cancelledPVR: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
repoConfigMgr: repoConfigMgr,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,6 +90,8 @@ type PodVolumeRestoreReconciler struct {
|
||||
nodeName string
|
||||
clock clocks.WithTickerAndDelayedExecution
|
||||
podResources corev1api.ResourceRequirements
|
||||
backupRepoConfigs map[string]string
|
||||
cacheVolumeConfigs *velerotypes.CachePVC
|
||||
exposer exposer.PodVolumeExposer
|
||||
dataPathMgr *datapath.Manager
|
||||
vgdpCounter *exposer.VgdpCounter
|
||||
@@ -92,6 +100,7 @@ type PodVolumeRestoreReconciler struct {
|
||||
cancelledPVR map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
repoConfigMgr repository.ConfigManager
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -886,6 +895,19 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
|
||||
}
|
||||
}
|
||||
|
||||
var cacheVolume *exposer.CacheConfigs
|
||||
if r.cacheVolumeConfigs != nil {
|
||||
if limit, err := r.repoConfigMgr.ClientSideCacheLimit(velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs); err != nil {
|
||||
log.WithError(err).Warnf("Failed to get client side cache limit for repo type %s from configs %v", velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs)
|
||||
} else {
|
||||
cacheVolume = &exposer.CacheConfigs{
|
||||
Limit: limit,
|
||||
StorageClass: r.cacheVolumeConfigs.StorageClass,
|
||||
ResidentThreshold: r.cacheVolumeConfigs.ResidentThreshold,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return exposer.PodVolumeExposeParam{
|
||||
Type: exposer.PodVolumeExposeTypeRestore,
|
||||
ClientNamespace: pvr.Spec.Pod.Namespace,
|
||||
@@ -896,6 +918,8 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
|
||||
HostingPodTolerations: hostingPodTolerations,
|
||||
OperationTimeout: r.resourceTimeout,
|
||||
Resources: r.podResources,
|
||||
RestoreSize: pvr.Spec.SnapshotSize,
|
||||
CacheVolume: cacheVolume,
|
||||
// Priority class name for the data mover pod, retrieved from node-agent-configmap
|
||||
PriorityClassName: r.dataMovePriorityClass,
|
||||
Privileged: r.privileged,
|
||||
|
||||
@@ -617,7 +617,7 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
|
||||
|
||||
dataPathMgr := datapath.NewManager(1)
|
||||
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false), nil
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, nil, nil, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false, nil), nil
|
||||
}
|
||||
|
||||
func TestPodVolumeRestoreReconcile(t *testing.T) {
|
||||
|
||||
@@ -22,7 +22,8 @@ func TestPkgImportNoCloudProvider(t *testing.T) {
|
||||
t.Logf("Current test file path: %s", filename)
|
||||
t.Logf("Current test directory: %s", filepath.Dir(filename)) // should be this package name
|
||||
// go list -f {{.Deps}} ./<path-to-this-package-dir>
|
||||
cmd := exec.Command(
|
||||
cmd := exec.CommandContext(
|
||||
t.Context(),
|
||||
"go",
|
||||
"list",
|
||||
"-f",
|
||||
|
||||
@@ -18,6 +18,7 @@ package label
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha3"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
@@ -49,6 +50,17 @@ func GetValidName(label string) string {
|
||||
return label[:charsFromLabel] + strSha[:6]
|
||||
}
|
||||
|
||||
// ReturnNameOrHash returns the original name if it is within the DNS1035LabelMaxLength limit,
|
||||
// otherwise it returns the sha3 Sum224 hash(length is 56) of the name.
|
||||
func ReturnNameOrHash(name string) string {
|
||||
if len(name) <= validation.DNS1035LabelMaxLength {
|
||||
return name
|
||||
}
|
||||
|
||||
hash := sha3.Sum224([]byte(name))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// NewSelectorForBackup returns a Selector based on the backup name.
|
||||
// This is useful for interacting with Listers that need a Selector.
|
||||
func NewSelectorForBackup(name string) labels.Selector {
|
||||
|
||||
@@ -48,6 +48,32 @@ func TestGetValidLabelName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReturnNameOrHash(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
label string
|
||||
expectedLabel string
|
||||
}{
|
||||
{
|
||||
name: "valid label name should not be modified",
|
||||
label: "short label value",
|
||||
expectedLabel: "short label value",
|
||||
},
|
||||
{
|
||||
name: "label with more than 63 characters should be modified",
|
||||
label: "this_is_a_very_long_label_value_that_will_be_rejected_by_Kubernetes",
|
||||
expectedLabel: "1a7399f2d00e268fc12daf431d6667319d1461e2609981070bb7e85c",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
labelVal := ReturnNameOrHash(test.label)
|
||||
assert.Equal(t, test.expectedLabel, labelVal)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSelectorForBackup(t *testing.T) {
|
||||
selector := NewSelectorForBackup("my-backup")
|
||||
assert.Equal(t, "velero.io/backup-name=my-backup", selector.String())
|
||||
|
||||
@@ -18,6 +18,7 @@ limitations under the License.
|
||||
package process
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
@@ -78,7 +79,7 @@ func (b *clientBuilder) clientConfig() *hcplugin.ClientConfig {
|
||||
string(common.PluginKindItemBlockAction): ibav1.NewItemBlockActionPlugin(common.ClientLogger(b.clientLogger)),
|
||||
},
|
||||
Logger: b.pluginLogger,
|
||||
Cmd: exec.Command(b.commandName, b.commandArgs...), //nolint:gosec // Internal call. No need to check the command line.
|
||||
Cmd: exec.CommandContext(context.Background(), b.commandName, b.commandArgs...), //nolint:gosec // Internal call. No need to check the command line.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -65,9 +65,11 @@ func TestClientConfig(t *testing.T) {
|
||||
string(common.PluginKindItemBlockAction): ibav1.NewItemBlockActionPlugin(common.ClientLogger(logger)),
|
||||
},
|
||||
Logger: cb.pluginLogger,
|
||||
Cmd: exec.Command(cb.commandName, cb.commandArgs...),
|
||||
Cmd: exec.CommandContext(t.Context(), cb.commandName, cb.commandArgs...),
|
||||
}
|
||||
|
||||
cc := cb.clientConfig()
|
||||
assert.Equal(t, expected, cc)
|
||||
assert.Equal(t, expected.HandshakeConfig, cc.HandshakeConfig)
|
||||
assert.Equal(t, expected.AllowedProtocols, cc.AllowedProtocols)
|
||||
assert.Equal(t, expected.Plugins, cc.Plugins)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package v2
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -19,8 +19,9 @@ package v2
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -22,7 +22,8 @@ func TestPkgImportNoCloudProvider(t *testing.T) {
|
||||
t.Logf("Current test file path: %s", filename)
|
||||
t.Logf("Current test directory: %s", filepath.Dir(filename)) // should be this package name
|
||||
// go list -f {{.Deps}} ./<path-to-this-package-dir>
|
||||
cmd := exec.Command(
|
||||
cmd := exec.CommandContext(
|
||||
t.Context(),
|
||||
"go",
|
||||
"list",
|
||||
"-f",
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package v1
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -19,8 +19,9 @@ package v1
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -20,8 +20,9 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -20,8 +20,9 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
proto "github.com/vmware-tanzu/velero/pkg/plugin/generated"
|
||||
|
||||
@@ -17,9 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package v2
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -19,8 +19,9 @@ package v2
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -19,8 +19,9 @@ package framework
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -32,11 +32,13 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -68,11 +70,22 @@ func GenerateJobName(repo string) string {
|
||||
}
|
||||
|
||||
// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs
|
||||
func DeleteOldJobs(cli client.Client, repo string, keep int, logger logrus.FieldLogger) error {
|
||||
func DeleteOldJobs(cli client.Client, repo velerov1api.BackupRepository, keep int, logger logrus.FieldLogger) error {
|
||||
logger.Infof("Start to delete old maintenance jobs. %d jobs will be kept.", keep)
|
||||
// Get the maintenance job list by label
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -339,10 +352,17 @@ func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string,
|
||||
// and then return the maintenance jobs' status in the range of limit
|
||||
func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, &client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
},
|
||||
client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name}),
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -558,7 +578,7 @@ func buildJob(
|
||||
}
|
||||
|
||||
podLabels := map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name),
|
||||
}
|
||||
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
@@ -588,7 +608,7 @@ func buildJob(
|
||||
Name: GenerateJobName(repo.Name),
|
||||
Namespace: repo.Namespace,
|
||||
Labels: map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/provider"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
@@ -48,7 +49,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
)
|
||||
|
||||
func TestGenerateJobName1(t *testing.T) {
|
||||
func TestGenerateJobName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
repo string
|
||||
expectedStart string
|
||||
@@ -82,59 +83,62 @@ func TestGenerateJobName1(t *testing.T) {
|
||||
}
|
||||
func TestDeleteOldJobs(t *testing.T) {
|
||||
// Set up test repo and keep value
|
||||
repo := "test-repo"
|
||||
keep := 2
|
||||
|
||||
// Create some maintenance jobs for testing
|
||||
var objs []client.Object
|
||||
// Create a newer job
|
||||
newerJob := &batchv1api.Job{
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
keep := 1
|
||||
|
||||
jobArray := []client.Object{
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-0",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
newJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-new",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, newerJob)
|
||||
// Create older jobs
|
||||
for i := 2; i <= 3; i++ {
|
||||
olderJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("job%d", i),
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
CreationTimestamp: metav1.Time{
|
||||
Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, olderJob)
|
||||
}
|
||||
// Create a fake Kubernetes client
|
||||
|
||||
// Create a fake Kubernetes client with 2 jobs.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = batchv1api.AddToScheme(scheme)
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(jobArray...).Build()
|
||||
|
||||
// Create a new job
|
||||
require.NoError(t, cli.Create(t.Context(), newJob))
|
||||
|
||||
// Call the function
|
||||
err := DeleteOldJobs(cli, repo, keep, velerotest.NewLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteOldJobs(cli, *repo, keep, velerotest.NewLogger()))
|
||||
|
||||
// Get the remaining jobs
|
||||
jobList := &batchv1api.JobList{}
|
||||
err = cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name})))
|
||||
|
||||
// We expect the number of jobs to be equal to 'keep'
|
||||
assert.Len(t, jobList.Items, keep)
|
||||
|
||||
// We expect that the oldest jobs were deleted
|
||||
// Job3 should not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[2])
|
||||
|
||||
// Job2 should also not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[1])
|
||||
// Only the new created job should be left.
|
||||
assert.Equal(t, jobList.Items[0].Name, newJob.Name)
|
||||
}
|
||||
|
||||
func TestWaitForJobComplete(t *testing.T) {
|
||||
@@ -571,7 +575,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: "fake-repo",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
BackupStorageLocation: "default",
|
||||
@@ -595,7 +599,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
}
|
||||
@@ -604,7 +608,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -624,7 +628,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job2",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -645,7 +649,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job3",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -665,7 +669,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job4",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 3)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -698,7 +702,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
{
|
||||
name: "list job error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedError: "error listing maintenance job for repo label with more than 63 characters should be modified: no kind is registered for the type v1.JobList in scheme",
|
||||
},
|
||||
{
|
||||
name: "job not exist",
|
||||
@@ -943,6 +947,7 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedSecurityContext *corev1api.SecurityContext
|
||||
expectedPodSecurityContext *corev1api.PodSecurityContext
|
||||
expectedImagePullSecrets []corev1api.LocalObjectReference
|
||||
backupRepository *velerov1api.BackupRepository
|
||||
}{
|
||||
{
|
||||
name: "Valid maintenance job without third party labels",
|
||||
@@ -1060,6 +1065,64 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedJobName: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
|
||||
m: &velerotypes.JobConfigs{
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "128Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "256Mi",
|
||||
},
|
||||
},
|
||||
deploy: deploy2,
|
||||
logLevel: logrus.InfoLevel,
|
||||
logFormat: logging.NewFormatFlag(),
|
||||
expectedError: false,
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodLabel: map[string]string{
|
||||
RepositoryNameLabel: velerolabel.ReturnNameOrHash("label with more than 63 characters should be modified"),
|
||||
"azure.workload.identity/use": "fake-label-value",
|
||||
},
|
||||
expectedSecurityContext: nil,
|
||||
expectedPodSecurityContext: nil,
|
||||
expectedImagePullSecrets: []corev1api.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
backupRepository: &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
VolumeNamespace: "test-123",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
param := provider.RepoParam{
|
||||
@@ -1083,6 +1146,10 @@ func TestBuildJob(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.backupRepository != nil {
|
||||
param.BackupRepo = tc.backupRepository
|
||||
}
|
||||
|
||||
// Create a fake clientset with resources
|
||||
objs := []runtime.Object{param.BackupLocation, param.BackupRepo}
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam
|
||||
"repo UID": param.BackupRepo.UID,
|
||||
})
|
||||
|
||||
log.Debug("Start to prepare repo")
|
||||
log.Info("Start to prepare repo")
|
||||
|
||||
repoOption, err := udmrepo.NewRepoOptions(
|
||||
udmrepo.WithPassword(urp, param),
|
||||
@@ -211,7 +211,7 @@ func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam
|
||||
if created, err := urp.repoService.IsCreated(ctx, *repoOption); err != nil {
|
||||
return errors.Wrap(err, "error to check backup repo")
|
||||
} else if created {
|
||||
log.Debug("Repo has already been initialized remotely")
|
||||
log.Info("Repo has already been initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam
|
||||
return errors.Wrap(err, "error to create backup repo")
|
||||
}
|
||||
|
||||
log.Debug("Prepare repo complete")
|
||||
log.Info("Prepare repo complete")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,12 +18,15 @@ package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
)
|
||||
|
||||
var ErrStoreNotExist = errors.New("store does not exist")
|
||||
|
||||
// Store defines the methods for Kopia to establish a connection to
|
||||
// the backend storage
|
||||
type Store interface {
|
||||
|
||||
@@ -92,3 +92,10 @@ func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) r
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func RepoOwnerFromRepoOptions(repoOptions udmrepo.RepoOptions) string {
|
||||
hostname := optionalHaveStringWithDefault(udmrepo.GenOptionOwnerDomain, repoOptions.GeneralOptions, udmrepo.GetRepoDomain())
|
||||
username := optionalHaveStringWithDefault(udmrepo.GenOptionOwnerName, repoOptions.GeneralOptions, udmrepo.GetRepoUser())
|
||||
|
||||
return username + "@" + hostname
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -62,6 +63,13 @@ func (c *FsBackend) Connect(ctx context.Context, isCreate bool, logger logrus.Fi
|
||||
if !filepath.IsAbs(c.options.Path) {
|
||||
return nil, errors.Errorf("filesystem repository path is not absolute, path: %s", c.options.Path)
|
||||
}
|
||||
|
||||
if !isCreate {
|
||||
if _, err := os.Stat(c.options.Path); err != nil {
|
||||
return nil, ErrStoreNotExist
|
||||
}
|
||||
}
|
||||
|
||||
ctx = logging.WithLogger(ctx, logger)
|
||||
|
||||
return filesystem.New(ctx, &c.options, isCreate)
|
||||
|
||||
@@ -98,11 +98,26 @@ func NewKopiaRepoService(logger logrus.FieldLogger) udmrepo.BackupRepoService {
|
||||
func (ks *kopiaRepoService) Create(ctx context.Context, repoOption udmrepo.RepoOptions) error {
|
||||
repoCtx := kopia.SetupKopiaLog(ctx, ks.logger)
|
||||
|
||||
if err := CreateBackupRepo(repoCtx, repoOption, ks.logger); err != nil {
|
||||
return err
|
||||
status, err := GetRepositoryStatus(ctx, repoOption, ks.logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting repo status")
|
||||
}
|
||||
|
||||
return writeInitParameters(repoCtx, repoOption, ks.logger)
|
||||
if status != RepoStatusSystemNotCreated && status != RepoStatusNotInitialized {
|
||||
return errors.Errorf("unexpected repo status %v", status)
|
||||
}
|
||||
|
||||
if status == RepoStatusSystemNotCreated {
|
||||
if err := CreateBackupRepo(repoCtx, repoOption, ks.logger); err != nil {
|
||||
return errors.Wrap(err, "error creating backup repo")
|
||||
}
|
||||
}
|
||||
|
||||
if err := InitializeBackupRepo(ctx, repoOption, ks.logger); err != nil {
|
||||
return errors.Wrap(err, "error initializing backup repo")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *kopiaRepoService) Connect(ctx context.Context, repoOption udmrepo.RepoOptions) error {
|
||||
@@ -114,7 +129,17 @@ func (ks *kopiaRepoService) Connect(ctx context.Context, repoOption udmrepo.Repo
|
||||
func (ks *kopiaRepoService) IsCreated(ctx context.Context, repoOption udmrepo.RepoOptions) (bool, error) {
|
||||
repoCtx := kopia.SetupKopiaLog(ctx, ks.logger)
|
||||
|
||||
return IsBackupRepoCreated(repoCtx, repoOption, ks.logger)
|
||||
status, err := GetRepositoryStatus(repoCtx, repoOption, ks.logger)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if status != RepoStatusCreated {
|
||||
ks.logger.Infof("Repo is not fully created, status %v", status)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (ks *kopiaRepoService) Open(ctx context.Context, repoOption udmrepo.RepoOptions) (udmrepo.BackupRepo, error) {
|
||||
@@ -612,73 +637,3 @@ func openKopiaRepo(ctx context.Context, configFile string, password string, _ *o
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func writeInitParameters(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) error {
|
||||
r, err := openKopiaRepo(ctx, repoOption.ConfigFilePath, repoOption.RepoPassword, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
c := r.Close(ctx)
|
||||
if c != nil {
|
||||
logger.WithError(c).Error("Failed to close repo")
|
||||
}
|
||||
}()
|
||||
|
||||
err = repo.WriteSession(ctx, r, repo.WriteSessionOptions{
|
||||
Purpose: "set init parameters",
|
||||
}, func(ctx context.Context, w repo.RepositoryWriter) error {
|
||||
p := maintenance.DefaultParams()
|
||||
|
||||
if overwriteFullMaintainInterval != time.Duration(0) {
|
||||
logger.Infof("Full maintenance interval change from %v to %v", p.FullCycle.Interval, overwriteFullMaintainInterval)
|
||||
p.FullCycle.Interval = overwriteFullMaintainInterval
|
||||
}
|
||||
|
||||
if overwriteQuickMaintainInterval != time.Duration(0) {
|
||||
logger.Infof("Quick maintenance interval change from %v to %v", p.QuickCycle.Interval, overwriteQuickMaintainInterval)
|
||||
p.QuickCycle.Interval = overwriteQuickMaintainInterval
|
||||
}
|
||||
// the repoOption.StorageOptions are set via
|
||||
// udmrepo.WithStoreOptions -> udmrepo.GetStoreOptions (interface)
|
||||
// -> pkg/repository/provider.GetStoreOptions(param interface{}) -> pkg/repository/provider.getStorageVariables(..., backupRepoConfig)
|
||||
// where backupRepoConfig comes from param.(RepoParam).BackupRepo.Spec.RepositoryConfig map[string]string
|
||||
// where RepositoryConfig comes from pkg/controller/getBackupRepositoryConfig(...)
|
||||
// where it gets a configMap name from pkg/cmd/server/config/Config.BackupRepoConfig
|
||||
// which gets set via velero server flag `backup-repository-configmap` "The name of ConfigMap containing backup repository configurations."
|
||||
// and data stored as json under ConfigMap.Data[repoType] where repoType is BackupRepository.Spec.RepositoryType: either kopia or restic
|
||||
// repoOption.StorageOptions[udmrepo.StoreOptionKeyFullMaintenanceInterval] would for example look like
|
||||
// configMapName.data.kopia: {"fullMaintenanceInterval": "eagerGC"}
|
||||
fullMaintIntervalOption := udmrepo.FullMaintenanceIntervalOptions(repoOption.StorageOptions[udmrepo.StoreOptionKeyFullMaintenanceInterval])
|
||||
priorMaintInterval := p.FullCycle.Interval
|
||||
switch fullMaintIntervalOption {
|
||||
case udmrepo.FastGC:
|
||||
p.FullCycle.Interval = udmrepo.FastGCInterval
|
||||
case udmrepo.EagerGC:
|
||||
p.FullCycle.Interval = udmrepo.EagerGCInterval
|
||||
case udmrepo.NormalGC:
|
||||
p.FullCycle.Interval = udmrepo.NormalGCInterval
|
||||
case "": // do nothing
|
||||
default:
|
||||
return errors.Errorf("invalid full maintenance interval option %s", fullMaintIntervalOption)
|
||||
}
|
||||
if priorMaintInterval != p.FullCycle.Interval {
|
||||
logger.Infof("Full maintenance interval change from %v to %v", priorMaintInterval, p.FullCycle.Interval)
|
||||
}
|
||||
|
||||
p.Owner = r.ClientOptions().UsernameAtHost()
|
||||
|
||||
if err := maintenance.SetParams(ctx, w, &p); err != nil {
|
||||
return errors.Wrap(err, "error to set maintenance params")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to init write repo parameters")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/maintenance"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/kopia/kopia/repo/object"
|
||||
"github.com/pkg/errors"
|
||||
@@ -264,177 +263,6 @@ func TestMaintain(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteInitParameters(t *testing.T) {
|
||||
var directRpo *repomocks.DirectRepository
|
||||
assertFullMaintIntervalEqual := func(expected, actual *maintenance.Params) bool {
|
||||
return assert.Equal(t, expected.FullCycle.Interval, actual.FullCycle.Interval)
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
repoOptions udmrepo.RepoOptions
|
||||
returnRepo *repomocks.DirectRepository
|
||||
returnRepoWriter *repomocks.DirectRepositoryWriter
|
||||
repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error)
|
||||
newRepoWriterError error
|
||||
replaceManifestError error
|
||||
// expected replacemanifest params to be received by maintenance.SetParams, and therefore writeInitParameters
|
||||
expectedReplaceManifestsParams *maintenance.Params
|
||||
// allows for asserting only certain fields are set as expected
|
||||
assertReplaceManifestsParams func(*maintenance.Params, *maintenance.Params) bool
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "repo open fail, repo not exist",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return nil, os.ErrNotExist
|
||||
},
|
||||
expectedErr: "error to open repo, repo doesn't exist: file does not exist",
|
||||
},
|
||||
{
|
||||
name: "repo open fail, other error",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return nil, errors.New("fake-repo-open-error")
|
||||
},
|
||||
expectedErr: "error to open repo: fake-repo-open-error",
|
||||
},
|
||||
{
|
||||
name: "write session fail",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
newRepoWriterError: errors.New("fake-new-writer-error"),
|
||||
expectedErr: "error to init write repo parameters: unable to create writer: fake-new-writer-error",
|
||||
},
|
||||
{
|
||||
name: "set repo param fail",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
replaceManifestError: errors.New("fake-replace-manifest-error"),
|
||||
expectedErr: "error to init write repo parameters: error to set maintenance params: put manifest: fake-replace-manifest-error",
|
||||
},
|
||||
{
|
||||
name: "repo with maintenance interval has expected params",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
StorageOptions: map[string]string{
|
||||
udmrepo.StoreOptionKeyFullMaintenanceInterval: string(udmrepo.FastGC),
|
||||
},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedReplaceManifestsParams: &maintenance.Params{
|
||||
FullCycle: maintenance.CycleParams{
|
||||
Interval: udmrepo.FastGCInterval,
|
||||
},
|
||||
},
|
||||
assertReplaceManifestsParams: assertFullMaintIntervalEqual,
|
||||
},
|
||||
{
|
||||
name: "repo with empty maintenance interval has expected params",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
StorageOptions: map[string]string{
|
||||
udmrepo.StoreOptionKeyFullMaintenanceInterval: string(""),
|
||||
},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedReplaceManifestsParams: &maintenance.Params{
|
||||
FullCycle: maintenance.CycleParams{
|
||||
Interval: udmrepo.NormalGCInterval,
|
||||
},
|
||||
},
|
||||
assertReplaceManifestsParams: assertFullMaintIntervalEqual,
|
||||
},
|
||||
{
|
||||
name: "repo with invalid maintenance interval has expected errors",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
StorageOptions: map[string]string{
|
||||
udmrepo.StoreOptionKeyFullMaintenanceInterval: string("foo"),
|
||||
},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedErr: "error to init write repo parameters: invalid full maintenance interval option foo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
logger := velerotest.NewLogger()
|
||||
ctx := t.Context()
|
||||
|
||||
if tc.repoOpen != nil {
|
||||
kopiaRepoOpen = tc.repoOpen
|
||||
}
|
||||
|
||||
if tc.returnRepo != nil {
|
||||
directRpo = tc.returnRepo
|
||||
}
|
||||
|
||||
if tc.returnRepo != nil {
|
||||
tc.returnRepo.On("NewWriter", mock.Anything, mock.Anything).Return(ctx, tc.returnRepoWriter, tc.newRepoWriterError)
|
||||
tc.returnRepo.On("ClientOptions").Return(repo.ClientOptions{})
|
||||
tc.returnRepo.On("Close", mock.Anything).Return(nil)
|
||||
}
|
||||
|
||||
if tc.returnRepoWriter != nil {
|
||||
tc.returnRepoWriter.On("Close", mock.Anything).Return(nil)
|
||||
if tc.replaceManifestError != nil {
|
||||
tc.returnRepoWriter.On("ReplaceManifests", mock.Anything, mock.Anything, mock.Anything).Return(manifest.ID(""), tc.replaceManifestError)
|
||||
}
|
||||
if tc.expectedReplaceManifestsParams != nil {
|
||||
tc.returnRepoWriter.On("ReplaceManifests", mock.Anything, mock.AnythingOfType("map[string]string"), mock.AnythingOfType("*maintenance.Params")).Return(manifest.ID(""), nil)
|
||||
tc.returnRepoWriter.On("Flush", mock.Anything).Return(nil)
|
||||
}
|
||||
}
|
||||
|
||||
err := writeInitParameters(ctx, tc.repoOptions, logger)
|
||||
|
||||
if tc.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, tc.expectedErr)
|
||||
}
|
||||
if tc.expectedReplaceManifestsParams != nil {
|
||||
actualReplaceManifestsParams, converted := tc.returnRepoWriter.Calls[0].Arguments.Get(2).(*maintenance.Params)
|
||||
assert.True(t, converted)
|
||||
tc.assertReplaceManifestsParams(tc.expectedReplaceManifestsParams, actualReplaceManifestsParams)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldLog(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -18,13 +18,18 @@ package kopialib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
"github.com/kopia/kopia/repo/maintenance"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
|
||||
@@ -45,6 +50,22 @@ var backendStores = []kopiaBackendStore{
|
||||
{udmrepo.StorageTypeS3, "an S3 bucket", &backend.S3Backend{}},
|
||||
}
|
||||
|
||||
const udmRepoBlobID = "udmrepo.Repository"
|
||||
|
||||
type udmRepoMetadata struct {
|
||||
UniqueID []byte `json:"uniqueID"`
|
||||
}
|
||||
|
||||
type RepoStatus int
|
||||
|
||||
const (
|
||||
RepoStatusUnknown = 0
|
||||
RepoStatusCorrupted = 1
|
||||
RepoStatusSystemNotCreated = 2
|
||||
RepoStatusNotInitialized = 3
|
||||
RepoStatusCreated = 4
|
||||
)
|
||||
|
||||
// CreateBackupRepo creates a Kopia repository and then connect to it.
|
||||
// The storage must be empty, otherwise, it will fail
|
||||
func CreateBackupRepo(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) error {
|
||||
@@ -73,14 +94,9 @@ func ConnectBackupRepo(ctx context.Context, repoOption udmrepo.RepoOptions, logg
|
||||
return errors.New("invalid config file path")
|
||||
}
|
||||
|
||||
backendStore, err := setupBackendStore(ctx, repoOption.StorageType, repoOption.StorageOptions, logger)
|
||||
st, err := connectStore(ctx, repoOption, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to setup backend storage")
|
||||
}
|
||||
|
||||
st, err := backendStore.store.Connect(ctx, false, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to connect to storage")
|
||||
return err
|
||||
}
|
||||
|
||||
err = connectWithStorage(ctx, st, repoOption)
|
||||
@@ -91,32 +107,119 @@ func ConnectBackupRepo(ctx context.Context, repoOption udmrepo.RepoOptions, logg
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsBackupRepoCreated(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) (bool, error) {
|
||||
backendStore, err := setupBackendStore(ctx, repoOption.StorageType, repoOption.StorageOptions, logger)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "error to setup backend storage")
|
||||
}
|
||||
|
||||
st, err := backendStore.store.Connect(ctx, false, logger)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "error to connect to storage")
|
||||
func GetRepositoryStatus(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) (RepoStatus, error) {
|
||||
st, err := connectStore(ctx, repoOption, logger)
|
||||
if errors.Is(err, backend.ErrStoreNotExist) {
|
||||
return RepoStatusSystemNotCreated, nil
|
||||
} else if err != nil {
|
||||
return RepoStatusUnknown, err
|
||||
}
|
||||
|
||||
var formatBytes byteBuffer
|
||||
if err := st.GetBlob(ctx, format.KopiaRepositoryBlobID, 0, -1, &formatBytes); err != nil {
|
||||
if errors.Is(err, blob.ErrBlobNotFound) {
|
||||
return false, nil
|
||||
logger.Debug("Kopia repository blob is not found")
|
||||
return RepoStatusSystemNotCreated, nil
|
||||
}
|
||||
|
||||
return false, errors.Wrap(err, "error to read format blob")
|
||||
return RepoStatusUnknown, errors.Wrap(err, "error reading format blob")
|
||||
}
|
||||
|
||||
_, err = format.ParseKopiaRepositoryJSON(formatBytes.buffer)
|
||||
repoFmt, err := format.ParseKopiaRepositoryJSON(formatBytes.buffer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return RepoStatusCorrupted, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
var initInfoBytes byteBuffer
|
||||
if err := st.GetBlob(ctx, udmRepoBlobID, 0, -1, &initInfoBytes); err != nil {
|
||||
if errors.Is(err, blob.ErrBlobNotFound) {
|
||||
logger.Debug("Udm repo metadata blob is not found")
|
||||
return RepoStatusNotInitialized, nil
|
||||
}
|
||||
|
||||
return RepoStatusUnknown, errors.Wrap(err, "error reading udm repo blob")
|
||||
}
|
||||
|
||||
udmpRepo := &udmRepoMetadata{}
|
||||
if err := json.Unmarshal(initInfoBytes.buffer, udmpRepo); err != nil {
|
||||
return RepoStatusCorrupted, errors.Wrap(err, "invalid udm repo blob")
|
||||
}
|
||||
|
||||
if !slices.Equal(udmpRepo.UniqueID, repoFmt.UniqueID) {
|
||||
return RepoStatusCorrupted, errors.Errorf("unique ID doesn't match: %v(%v)", udmpRepo.UniqueID, repoFmt.UniqueID)
|
||||
}
|
||||
|
||||
return RepoStatusCreated, nil
|
||||
}
|
||||
|
||||
func InitializeBackupRepo(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) error {
|
||||
if repoOption.ConfigFilePath == "" {
|
||||
return errors.New("invalid config file path")
|
||||
}
|
||||
|
||||
st, err := connectStore(ctx, repoOption, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = connectWithStorage(ctx, st, repoOption)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error connecting repo with storage")
|
||||
}
|
||||
|
||||
err = writeInitParameters(ctx, repoOption, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error writing init parameters")
|
||||
}
|
||||
|
||||
err = writeUdmRepoMetadata(ctx, st)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error writing udm repo metadata")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUdmRepoMetadata(ctx context.Context, st blob.Storage) error {
|
||||
var formatBytes byteBuffer
|
||||
if err := st.GetBlob(ctx, format.KopiaRepositoryBlobID, 0, -1, &formatBytes); err != nil {
|
||||
return errors.Wrap(err, "error reading format blob")
|
||||
}
|
||||
|
||||
repoFmt, err := format.ParseKopiaRepositoryJSON(formatBytes.buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
udmpRepo := &udmRepoMetadata{
|
||||
UniqueID: repoFmt.UniqueID,
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(udmpRepo)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error marshaling udm repo metadata")
|
||||
}
|
||||
|
||||
err = st.PutBlob(ctx, udmRepoBlobID, &byteBuffer{bytes}, blob.PutOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error writing udm repo metadata")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectStore(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) (blob.Storage, error) {
|
||||
backendStore, err := setupBackendStore(ctx, repoOption.StorageType, repoOption.StorageOptions, logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error to setup backend storage")
|
||||
}
|
||||
|
||||
st, err := backendStore.store.Connect(ctx, false, logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error to connect to storage")
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func findBackendStore(storage string) *kopiaBackendStore {
|
||||
@@ -185,11 +288,21 @@ type byteBuffer struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (b *byteBuffer) Write(p []byte) (n int, err error) {
|
||||
type byteBufferReader struct {
|
||||
buffer []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func (b *byteBuffer) Write(p []byte) (int, error) {
|
||||
b.buffer = append(b.buffer, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *byteBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b.buffer)
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
func (b *byteBuffer) Reset() {
|
||||
b.buffer = nil
|
||||
}
|
||||
@@ -197,3 +310,129 @@ func (b *byteBuffer) Reset() {
|
||||
func (b *byteBuffer) Length() int {
|
||||
return len(b.buffer)
|
||||
}
|
||||
|
||||
func (b *byteBuffer) Reader() io.ReadSeekCloser {
|
||||
return &byteBufferReader{buffer: b.buffer}
|
||||
}
|
||||
|
||||
func (b *byteBufferReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteBufferReader) Read(out []byte) (int, error) {
|
||||
if b.pos == len(b.buffer) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
copied := copy(out, b.buffer[b.pos:])
|
||||
b.pos += copied
|
||||
|
||||
return copied, nil
|
||||
}
|
||||
|
||||
func (b *byteBufferReader) Seek(offset int64, whence int) (int64, error) {
|
||||
newOffset := b.pos
|
||||
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newOffset = int(offset)
|
||||
case io.SeekCurrent:
|
||||
newOffset += int(offset)
|
||||
case io.SeekEnd:
|
||||
newOffset = len(b.buffer) + int(offset)
|
||||
}
|
||||
|
||||
if newOffset < 0 || newOffset > len(b.buffer) {
|
||||
return -1, errors.New("invalid seek")
|
||||
}
|
||||
|
||||
b.pos = newOffset
|
||||
|
||||
return int64(newOffset), nil
|
||||
}
|
||||
|
||||
var funcGetParam = maintenance.GetParams
|
||||
|
||||
func writeInitParameters(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) error {
|
||||
r, err := openKopiaRepo(ctx, repoOption.ConfigFilePath, repoOption.RepoPassword, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
c := r.Close(ctx)
|
||||
if c != nil {
|
||||
logger.WithError(c).Error("Failed to close repo")
|
||||
}
|
||||
}()
|
||||
|
||||
params, err := funcGetParam(ctx, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting existing maintenance params")
|
||||
}
|
||||
|
||||
if params.Owner == backend.RepoOwnerFromRepoOptions(repoOption) {
|
||||
logger.Warn("Init parameters already exists, skip")
|
||||
return nil
|
||||
}
|
||||
|
||||
if params.Owner != "" {
|
||||
logger.Warnf("Overwriting existing init params %v", params)
|
||||
}
|
||||
|
||||
err = repo.WriteSession(ctx, r, repo.WriteSessionOptions{
|
||||
Purpose: "set init parameters",
|
||||
}, func(ctx context.Context, w repo.RepositoryWriter) error {
|
||||
p := maintenance.DefaultParams()
|
||||
|
||||
if overwriteFullMaintainInterval != time.Duration(0) {
|
||||
logger.Infof("Full maintenance interval change from %v to %v", p.FullCycle.Interval, overwriteFullMaintainInterval)
|
||||
p.FullCycle.Interval = overwriteFullMaintainInterval
|
||||
}
|
||||
|
||||
if overwriteQuickMaintainInterval != time.Duration(0) {
|
||||
logger.Infof("Quick maintenance interval change from %v to %v", p.QuickCycle.Interval, overwriteQuickMaintainInterval)
|
||||
p.QuickCycle.Interval = overwriteQuickMaintainInterval
|
||||
}
|
||||
// the repoOption.StorageOptions are set via
|
||||
// udmrepo.WithStoreOptions -> udmrepo.GetStoreOptions (interface)
|
||||
// -> pkg/repository/provider.GetStoreOptions(param interface{}) -> pkg/repository/provider.getStorageVariables(..., backupRepoConfig)
|
||||
// where backupRepoConfig comes from param.(RepoParam).BackupRepo.Spec.RepositoryConfig map[string]string
|
||||
// where RepositoryConfig comes from pkg/controller/getBackupRepositoryConfig(...)
|
||||
// where it gets a configMap name from pkg/cmd/server/config/Config.BackupRepoConfig
|
||||
// which gets set via velero server flag `backup-repository-configmap` "The name of ConfigMap containing backup repository configurations."
|
||||
// and data stored as json under ConfigMap.Data[repoType] where repoType is BackupRepository.Spec.RepositoryType: either kopia or restic
|
||||
// repoOption.StorageOptions[udmrepo.StoreOptionKeyFullMaintenanceInterval] would for example look like
|
||||
// configMapName.data.kopia: {"fullMaintenanceInterval": "eagerGC"}
|
||||
fullMaintIntervalOption := udmrepo.FullMaintenanceIntervalOptions(repoOption.StorageOptions[udmrepo.StoreOptionKeyFullMaintenanceInterval])
|
||||
priorMaintInterval := p.FullCycle.Interval
|
||||
switch fullMaintIntervalOption {
|
||||
case udmrepo.FastGC:
|
||||
p.FullCycle.Interval = udmrepo.FastGCInterval
|
||||
case udmrepo.EagerGC:
|
||||
p.FullCycle.Interval = udmrepo.EagerGCInterval
|
||||
case udmrepo.NormalGC:
|
||||
p.FullCycle.Interval = udmrepo.NormalGCInterval
|
||||
case "": // do nothing
|
||||
default:
|
||||
return errors.Errorf("invalid full maintenance interval option %s", fullMaintIntervalOption)
|
||||
}
|
||||
if priorMaintInterval != p.FullCycle.Interval {
|
||||
logger.Infof("Full maintenance interval change from %v to %v", priorMaintInterval, p.FullCycle.Interval)
|
||||
}
|
||||
|
||||
p.Owner = r.ClientOptions().UsernameAtHost()
|
||||
|
||||
if err := maintenance.SetParams(ctx, w, &p); err != nil {
|
||||
return errors.Wrap(err, "error to set maintenance params")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to init write repo parameters")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,9 +18,14 @@ package kopialib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/maintenance"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
|
||||
@@ -29,6 +34,8 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend"
|
||||
repomocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend/mocks"
|
||||
storagemocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend/mocks"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -239,7 +246,7 @@ func TestConnectBackupRepo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBackupRepoCreated(t *testing.T) {
|
||||
func TestGetRepositoryStatus(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
backendStore *storagemocks.Store
|
||||
@@ -248,7 +255,7 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
setupError error
|
||||
returnStore *storagemocks.Storage
|
||||
retFuncGetBlob func(context.Context, blob.ID, int64, int64, blob.OutputBuffer) error
|
||||
expected bool
|
||||
expected RepoStatus
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
@@ -256,6 +263,7 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
},
|
||||
expected: RepoStatusUnknown,
|
||||
expectedErr: "error to setup backend storage: error to find storage type",
|
||||
},
|
||||
{
|
||||
@@ -266,6 +274,7 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
setupError: errors.New("fake-setup-error"),
|
||||
expected: RepoStatusUnknown,
|
||||
expectedErr: "error to setup backend storage: error to setup storage: fake-setup-error",
|
||||
},
|
||||
{
|
||||
@@ -276,10 +285,21 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
connectErr: errors.New("fake-connect-error"),
|
||||
expected: RepoStatusUnknown,
|
||||
expectedErr: "error to connect to storage: fake-connect-error",
|
||||
},
|
||||
{
|
||||
name: "get blob error",
|
||||
name: "storage not exist",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
connectErr: backend.ErrStoreNotExist,
|
||||
expected: RepoStatusSystemNotCreated,
|
||||
},
|
||||
{
|
||||
name: "get repo blob error",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
@@ -289,10 +309,24 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
retFuncGetBlob: func(context.Context, blob.ID, int64, int64, blob.OutputBuffer) error {
|
||||
return errors.New("fake-get-blob-error")
|
||||
},
|
||||
expectedErr: "error to read format blob: fake-get-blob-error",
|
||||
expected: RepoStatusUnknown,
|
||||
expectedErr: "error reading format blob: fake-get-blob-error",
|
||||
},
|
||||
{
|
||||
name: "wrong format",
|
||||
name: "no repo blob",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
returnStore: new(storagemocks.Storage),
|
||||
retFuncGetBlob: func(context.Context, blob.ID, int64, int64, blob.OutputBuffer) error {
|
||||
return blob.ErrBlobNotFound
|
||||
},
|
||||
expected: RepoStatusSystemNotCreated,
|
||||
},
|
||||
{
|
||||
name: "wrong repo format",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
@@ -303,8 +337,105 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
output.Write([]byte("fake-buffer"))
|
||||
return nil
|
||||
},
|
||||
expected: RepoStatusCorrupted,
|
||||
expectedErr: "invalid format blob: invalid character 'k' in literal false (expecting 'l')",
|
||||
},
|
||||
{
|
||||
name: "get udm repo blob error",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
returnStore: new(storagemocks.Storage),
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
if blobID == udmRepoBlobID {
|
||||
return errors.New("fake-get-blob-error")
|
||||
} else {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[],"keyAlgo":"","encryption":""}`))
|
||||
return nil
|
||||
}
|
||||
},
|
||||
expected: RepoStatusUnknown,
|
||||
expectedErr: "error reading udm repo blob: fake-get-blob-error",
|
||||
},
|
||||
{
|
||||
name: "no udm repo blob",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
returnStore: new(storagemocks.Storage),
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
if blobID == udmRepoBlobID {
|
||||
return blob.ErrBlobNotFound
|
||||
} else {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[],"keyAlgo":"","encryption":""}`))
|
||||
return nil
|
||||
}
|
||||
},
|
||||
expected: RepoStatusNotInitialized,
|
||||
},
|
||||
{
|
||||
name: "wrong udm repo metadata",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
returnStore: new(storagemocks.Storage),
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
if blobID == udmRepoBlobID {
|
||||
output.Write([]byte("fake-buffer"))
|
||||
} else {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[],"keyAlgo":"","encryption":""}`))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
expected: RepoStatusCorrupted,
|
||||
expectedErr: "invalid udm repo blob: invalid character 'k' in literal false (expecting 'l')",
|
||||
},
|
||||
{
|
||||
name: "wrong unique id",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
returnStore: new(storagemocks.Storage),
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
if blobID == udmRepoBlobID {
|
||||
output.Write([]byte(`{"uniqueID":[4,5,6]}`))
|
||||
} else {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[1,2,3],"keyAlgo":"","encryption":""}`))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
expected: RepoStatusCorrupted,
|
||||
expectedErr: "unique ID doesn't match: [4 5 6]([1 2 3])",
|
||||
},
|
||||
{
|
||||
name: "succeed",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "fake-file",
|
||||
StorageType: udmrepo.StorageTypeAzure,
|
||||
},
|
||||
backendStore: new(storagemocks.Store),
|
||||
returnStore: new(storagemocks.Storage),
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
if blobID == udmRepoBlobID {
|
||||
output.Write([]byte(`{"uniqueID":[1,2,3]}`))
|
||||
} else {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[1,2,3],"keyAlgo":"","encryption":""}`))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
expected: RepoStatusCreated,
|
||||
},
|
||||
}
|
||||
|
||||
logger := velerotest.NewLogger()
|
||||
@@ -326,7 +457,7 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
tc.returnStore.On("GetBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.retFuncGetBlob)
|
||||
}
|
||||
|
||||
created, err := IsBackupRepoCreated(t.Context(), tc.repoOptions, logger)
|
||||
status, err := GetRepositoryStatus(t.Context(), tc.repoOptions, logger)
|
||||
|
||||
if tc.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
@@ -334,7 +465,390 @@ func TestIsBackupRepoCreated(t *testing.T) {
|
||||
require.EqualError(t, err, tc.expectedErr)
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expected, created)
|
||||
assert.Equal(t, tc.expected, status)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteInitParameters(t *testing.T) {
|
||||
var directRpo *repomocks.DirectRepository
|
||||
assertFullMaintIntervalEqual := func(expected, actual *maintenance.Params) bool {
|
||||
return assert.Equal(t, expected.FullCycle.Interval, actual.FullCycle.Interval)
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
repoOptions udmrepo.RepoOptions
|
||||
returnRepo *repomocks.DirectRepository
|
||||
returnRepoWriter *repomocks.DirectRepositoryWriter
|
||||
repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error)
|
||||
newRepoWriterError error
|
||||
replaceManifestError error
|
||||
getParam func(context.Context, repo.Repository) (*maintenance.Params, error)
|
||||
// expected replacemanifest params to be received by maintenance.SetParams, and therefore writeInitParameters
|
||||
expectedReplaceManifestsParams *maintenance.Params
|
||||
// allows for asserting only certain fields are set as expected
|
||||
assertReplaceManifestsParams func(*maintenance.Params, *maintenance.Params) bool
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "repo open fail, repo not exist",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return nil, os.ErrNotExist
|
||||
},
|
||||
expectedErr: "error to open repo, repo doesn't exist: file does not exist",
|
||||
},
|
||||
{
|
||||
name: "repo open fail, other error",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return nil, errors.New("fake-repo-open-error")
|
||||
},
|
||||
expectedErr: "error to open repo: fake-repo-open-error",
|
||||
},
|
||||
{
|
||||
name: "get params error",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
getParam: func(context.Context, repo.Repository) (*maintenance.Params, error) {
|
||||
return nil, errors.New("fake-get-param-error")
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
expectedErr: "error getting existing maintenance params: fake-get-param-error",
|
||||
},
|
||||
{
|
||||
name: "existing param with identical owner",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
getParam: func(context.Context, repo.Repository) (*maintenance.Params, error) {
|
||||
return &maintenance.Params{
|
||||
Owner: "default@default",
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing param with different owner",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
getParam: func(context.Context, repo.Repository) (*maintenance.Params, error) {
|
||||
return &maintenance.Params{
|
||||
Owner: "fake-owner",
|
||||
}, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedReplaceManifestsParams: &maintenance.Params{
|
||||
FullCycle: maintenance.CycleParams{
|
||||
Interval: udmrepo.NormalGCInterval,
|
||||
},
|
||||
},
|
||||
assertReplaceManifestsParams: assertFullMaintIntervalEqual,
|
||||
},
|
||||
{
|
||||
name: "write session fail",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
newRepoWriterError: errors.New("fake-new-writer-error"),
|
||||
expectedErr: "error to init write repo parameters: unable to create writer: fake-new-writer-error",
|
||||
},
|
||||
{
|
||||
name: "set repo param fail",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
GeneralOptions: map[string]string{},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
replaceManifestError: errors.New("fake-replace-manifest-error"),
|
||||
expectedErr: "error to init write repo parameters: error to set maintenance params: put manifest: fake-replace-manifest-error",
|
||||
},
|
||||
{
|
||||
name: "repo with maintenance interval has expected params",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
StorageOptions: map[string]string{
|
||||
udmrepo.StoreOptionKeyFullMaintenanceInterval: string(udmrepo.FastGC),
|
||||
},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedReplaceManifestsParams: &maintenance.Params{
|
||||
FullCycle: maintenance.CycleParams{
|
||||
Interval: udmrepo.FastGCInterval,
|
||||
},
|
||||
},
|
||||
assertReplaceManifestsParams: assertFullMaintIntervalEqual,
|
||||
},
|
||||
{
|
||||
name: "repo with empty maintenance interval has expected params",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
StorageOptions: map[string]string{
|
||||
udmrepo.StoreOptionKeyFullMaintenanceInterval: string(""),
|
||||
},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedReplaceManifestsParams: &maintenance.Params{
|
||||
FullCycle: maintenance.CycleParams{
|
||||
Interval: udmrepo.NormalGCInterval,
|
||||
},
|
||||
},
|
||||
assertReplaceManifestsParams: assertFullMaintIntervalEqual,
|
||||
},
|
||||
{
|
||||
name: "repo with invalid maintenance interval has expected errors",
|
||||
repoOptions: udmrepo.RepoOptions{
|
||||
ConfigFilePath: "/tmp",
|
||||
StorageOptions: map[string]string{
|
||||
udmrepo.StoreOptionKeyFullMaintenanceInterval: string("foo"),
|
||||
},
|
||||
},
|
||||
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
|
||||
return directRpo, nil
|
||||
},
|
||||
returnRepo: new(repomocks.DirectRepository),
|
||||
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
|
||||
expectedErr: "error to init write repo parameters: invalid full maintenance interval option foo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
logger := velerotest.NewLogger()
|
||||
ctx := t.Context()
|
||||
|
||||
if tc.repoOpen != nil {
|
||||
kopiaRepoOpen = tc.repoOpen
|
||||
}
|
||||
|
||||
if tc.returnRepo != nil {
|
||||
directRpo = tc.returnRepo
|
||||
}
|
||||
|
||||
if tc.returnRepo != nil {
|
||||
tc.returnRepo.On("NewWriter", mock.Anything, mock.Anything).Return(ctx, tc.returnRepoWriter, tc.newRepoWriterError)
|
||||
tc.returnRepo.On("ClientOptions").Return(repo.ClientOptions{})
|
||||
tc.returnRepo.On("Close", mock.Anything).Return(nil)
|
||||
}
|
||||
|
||||
if tc.returnRepoWriter != nil {
|
||||
tc.returnRepoWriter.On("Close", mock.Anything).Return(nil)
|
||||
if tc.replaceManifestError != nil {
|
||||
tc.returnRepoWriter.On("ReplaceManifests", mock.Anything, mock.Anything, mock.Anything).Return(manifest.ID(""), tc.replaceManifestError)
|
||||
}
|
||||
if tc.expectedReplaceManifestsParams != nil {
|
||||
tc.returnRepoWriter.On("ReplaceManifests", mock.Anything, mock.AnythingOfType("map[string]string"), mock.AnythingOfType("*maintenance.Params")).Return(manifest.ID(""), nil)
|
||||
tc.returnRepoWriter.On("Flush", mock.Anything).Return(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.getParam != nil {
|
||||
funcGetParam = tc.getParam
|
||||
} else {
|
||||
funcGetParam = func(ctx context.Context, rep repo.Repository) (*maintenance.Params, error) {
|
||||
return &maintenance.Params{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
err := writeInitParameters(ctx, tc.repoOptions, logger)
|
||||
|
||||
if tc.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, tc.expectedErr)
|
||||
}
|
||||
if tc.expectedReplaceManifestsParams != nil {
|
||||
actualReplaceManifestsParams, converted := tc.returnRepoWriter.Calls[0].Arguments.Get(2).(*maintenance.Params)
|
||||
assert.True(t, converted)
|
||||
tc.assertReplaceManifestsParams(tc.expectedReplaceManifestsParams, actualReplaceManifestsParams)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteUdmRepoMetadata(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
retFuncGetBlob func(context.Context, blob.ID, int64, int64, blob.OutputBuffer) error
|
||||
retFuncPutBlob func(context.Context, blob.ID, blob.Bytes, blob.PutOptions) error
|
||||
replaceMetadata *udmRepoMetadata
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "get repo blob error",
|
||||
retFuncGetBlob: func(context.Context, blob.ID, int64, int64, blob.OutputBuffer) error {
|
||||
return errors.New("fake-get-blob-error")
|
||||
},
|
||||
expectedErr: "error reading format blob: fake-get-blob-error",
|
||||
},
|
||||
{
|
||||
name: "wrong repo format",
|
||||
retFuncGetBlob: func(ctx context.Context, id blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
output.Write([]byte("fake-buffer"))
|
||||
return nil
|
||||
},
|
||||
expectedErr: "invalid format blob: invalid character 'k' in literal false (expecting 'l')",
|
||||
},
|
||||
{
|
||||
name: "put udm repo metadata blob error",
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[],"keyAlgo":"","encryption":""}`))
|
||||
return nil
|
||||
},
|
||||
retFuncPutBlob: func(context.Context, blob.ID, blob.Bytes, blob.PutOptions) error {
|
||||
return errors.New("fake-put-blob-error")
|
||||
},
|
||||
expectedErr: "error writing udm repo metadata: fake-put-blob-error",
|
||||
},
|
||||
{
|
||||
name: "succeed",
|
||||
retFuncGetBlob: func(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error {
|
||||
output.Write([]byte(`{"tool":"","buildVersion":"","buildInfo":"","uniqueID":[],"keyAlgo":"","encryption":""}`))
|
||||
return nil
|
||||
},
|
||||
retFuncPutBlob: func(context.Context, blob.ID, blob.Bytes, blob.PutOptions) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
storage := new(storagemocks.Storage)
|
||||
if tc.retFuncGetBlob != nil {
|
||||
storage.On("GetBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.retFuncGetBlob)
|
||||
}
|
||||
|
||||
if tc.retFuncPutBlob != nil {
|
||||
storage.On("PutBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.retFuncPutBlob)
|
||||
}
|
||||
|
||||
err := writeUdmRepoMetadata(t.Context(), storage)
|
||||
|
||||
if tc.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, tc.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testRecv struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (r *testRecv) Write(p []byte) (n int, err error) {
|
||||
r.buffer = append(r.buffer, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestByteBuffer(t *testing.T) {
|
||||
buffer := &byteBuffer{}
|
||||
written, err := buffer.Write([]byte("12345"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, written)
|
||||
|
||||
written, err = buffer.Write([]byte("67890"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, written)
|
||||
require.Equal(t, 10, buffer.Length())
|
||||
|
||||
recv := &testRecv{}
|
||||
copied, err := buffer.WriteTo(recv)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10), copied)
|
||||
require.Equal(t, []byte("1234567890"), recv.buffer)
|
||||
|
||||
buffer.Reset()
|
||||
require.Zero(t, buffer.Length())
|
||||
}
|
||||
|
||||
func TestByteBufferReader(t *testing.T) {
|
||||
buffer := &byteBufferReader{buffer: []byte("123456789012345678901234567890")}
|
||||
off, err := buffer.Seek(100, io.SeekStart)
|
||||
require.Equal(t, int64(-1), off)
|
||||
require.EqualError(t, err, "invalid seek")
|
||||
require.Zero(t, buffer.pos)
|
||||
|
||||
off, err = buffer.Seek(-100, io.SeekEnd)
|
||||
require.Equal(t, int64(-1), off)
|
||||
require.EqualError(t, err, "invalid seek")
|
||||
require.Zero(t, buffer.pos)
|
||||
|
||||
off, err = buffer.Seek(3, io.SeekCurrent)
|
||||
require.Equal(t, int64(3), off)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, buffer.pos)
|
||||
|
||||
output := make([]byte, 6)
|
||||
read, err := buffer.Read(output)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, read)
|
||||
require.Equal(t, 9, buffer.pos)
|
||||
require.Equal(t, []byte("456789"), output)
|
||||
|
||||
off, err = buffer.Seek(21, io.SeekStart)
|
||||
require.Equal(t, int64(21), off)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 21, buffer.pos)
|
||||
|
||||
output = make([]byte, 6)
|
||||
read, err = buffer.Read(output)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, read)
|
||||
require.Equal(t, 27, buffer.pos)
|
||||
require.Equal(t, []byte("234567"), output)
|
||||
|
||||
output = make([]byte, 6)
|
||||
read, err = buffer.Read(output)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, read)
|
||||
require.Equal(t, 30, buffer.pos)
|
||||
require.Equal(t, []byte{'8', '9', '0', 0, 0, 0}, output)
|
||||
|
||||
output = make([]byte, 6)
|
||||
read, err = buffer.Read(output)
|
||||
require.Zero(t, read)
|
||||
require.Equal(t, io.EOF, err)
|
||||
|
||||
err = buffer.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (c *Command) String() string {
|
||||
// Cmd returns an exec.Cmd for the command.
|
||||
func (c *Command) Cmd() *exec.Cmd {
|
||||
parts := c.StringSlice()
|
||||
cmd := exec.Command(parts[0], parts[1:]...) //nolint:gosec // Internal call. No need to check the parameter.
|
||||
cmd := exec.Command(parts[0], parts[1:]...) //nolint:gosec,noctx // Internal call. No need to check the parameter. No to add context for deprecated Restic.
|
||||
cmd.Dir = c.Dir
|
||||
|
||||
if len(c.Env) > 0 {
|
||||
|
||||
@@ -20,9 +20,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -313,6 +313,12 @@ kubectl -n velero get datauploads -l velero.io/backup-name=YOUR_BACKUP_NAME -w
|
||||
kubectl -n velero get datadownloads -l velero.io/restore-name=YOUR_RESTORE_NAME -w
|
||||
```
|
||||
|
||||
For each volume, the parallelism is like below:
|
||||
- If it is a file system mode volume, files in the volume are processed in parallel. You can use `--parallel-files-upload` backup flag or `--parallel-files-download` restore flag to control how many files are processed in parallel. Otherwise, if they are not set, Velero by default refers to the number of CPU cores in the node (where the backup/restore is running) for the parallelism. That is to say, the parallelism is not affected by the CPU request/limit set to the data mover pods.
|
||||
- If it is a block mode volume, there is no parallelism, the block data is processed sequentially.
|
||||
|
||||
Notice that Golang 1.25 and later respects the CPU limit set to the pods to decide the physical threads provisioned to the pod processes (see [Container-aware GOMAXPROCS][22] for more details), so for Velero 1.18 (which consumes Golang 1.25) and later, if you set a CPU limit to the data mover pods, you may not get the expected performance (e.g., backup/restore throughput) with the default parallelism. The outcome may or may not be obvious varying on your volume data. If it is required, you could customize `--parallel-files-upload` or `--parallel-files-download` according to the CPU limit set to the data mover pods.
|
||||
|
||||
### Restart and resume
|
||||
When Velero server is restarted, if the resource backup/restore has completed, so the backup/restore has excceded `InProgress` status and is waiting for the completion of the data movements, Velero will recapture the status of the running data movements and resume the execution.
|
||||
When node-agent is restarted, Velero tries to recapture the status of the running data movements and resume the execution; if the resume fails, the data movements are canceled.
|
||||
@@ -376,7 +382,10 @@ For Velero built-in data mover, Velero uses [BestEffort as the QoS][13] for data
|
||||
If you want to constraint the CPU/memory usage, you need to [Customize Data Mover Pod Resource Limits][11]. The CPU/memory consumption is always related to the scale of data to be backed up/restored, refer to [Performance Guidance][12] for more details, so it is highly recommended that you perform your own testing to find the best resource limits for your data.
|
||||
|
||||
During the restore, the repository may also cache data/metadata so as to reduce the network footprint and speed up the restore. The repository uses its own policy to store and clean up the cache.
|
||||
For Kopia repository, the cache is stored in the data mover pod's root file system. Velero allows you to configure a limit of the cache size so that the data mover pod won't be evicted due to running out of the ephemeral storage. For more details, check [Backup Repository Configuration][17].
|
||||
For Kopia repository, by default, the cache is stored in the data mover pod's root file system. If your root file system space is limited, the data mover pods may be evicted due to running out of the ephemeral storage, which causes the restore fails. To cope with this problem, Velero allows you:
|
||||
- configure a limit of the cache size per backup repository, for more details, check [Backup Repository Configuration][17].
|
||||
- configure a dedicated volume for cache data, for more details, check [Data Movement Cache Volume][21].
|
||||
|
||||
|
||||
### Node Selection
|
||||
|
||||
@@ -416,4 +425,7 @@ Sometimes, `RestorePVC` needs to be configured to increase the performance of re
|
||||
[18]: https://github.com/vmware-tanzu/velero/pull/7576
|
||||
[19]: data-movement-restore-pvc-configuration.md
|
||||
[20]: node-agent-prepare-queue-length.md
|
||||
[21]: data-movement-cache-volume.md
|
||||
[22]: https://tip.golang.org/doc/go1.25#container-aware-gomaxprocs:~:text=Runtime%C2%B6-,Container%2Daware%20GOMAXPROCS,-%C2%B6
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ title: "BackupPVC Configuration for Data Movement Backup"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
> **📖 For a comprehensive guide** covering all node-agent configuration options, see [Node-agent Configuration](node-agent-config.md).
|
||||
|
||||
`BackupPVC` is an intermediate PVC to access data from during the data movement backup operation.
|
||||
|
||||
In some scenarios users may need to configure some advanced options of the backupPVC so that the data movement backup
|
||||
@@ -75,3 +77,13 @@ timeout (data movement prepare timeout value is 30m by default).
|
||||
if the volume is not readOnly.
|
||||
- If any of the above problems occur, then the DataUpload CR is `canceled` after timeout, and the backupPod and backupPVC will be deleted, and the backup
|
||||
will be marked as `PartiallyFailed`.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
55
site/content/docs/main/data-movement-cache-volume.md
Normal file
55
site/content/docs/main/data-movement-cache-volume.md
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
title: "Cache PVC Configuration for Data Movement Restore"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
Velero data movement restore (i.e., for CSI snapshot data movement and fs-backup) may request the backup repository to cache data locally so as to reduce the data request from the remote backup storage.
|
||||
The cache behavior is decided by the specific backup repository, and Velero allows you to configure a cache limit for the backup repositories who support it (i.e., kopia repository). For more details, see [Backup Repository Configuration][1].
|
||||
The size of cache may significantly impact on the performance. Specifically, if the cache size is too small, the restore throughput will be severely reduced and much more data would be downloaded from the backup storage.
|
||||
By default, the cache data location is in the data mover pods' root disk. In some environments, the pods' root disk size is very limited, so a large cache size would cause the data mover pods evicted because of running out of ephemeral disk.
|
||||
|
||||
To cope with the problems and guarantee the data mover pods always run with a fine tuned local cache, Velero supports dedicated cache PVCs for data movement restore, for CSI snapshot data movement and fs-backup.
|
||||
|
||||
By default, Velero data mover pods run without cache PVCs. To enable cache PVC, you need to fill the cache PVC configurations in the node-agent configMap.
|
||||
|
||||
A sample of cache PVC configuration as part of the ConfigMap would look like:
|
||||
```json
|
||||
{
|
||||
"cachePVC": {
|
||||
"thresholdInGB": 1,
|
||||
"storageClass": "sc-wffc"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To create the configMap, save something like the above sample to a file and then run below commands:
|
||||
```shell
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
A must-have field in the configuration is `storageClass` which tells Velero which storage class is used to provision the cache PVC. Velero relies on Kubernetes dynamic provision process to provision the PVC, static provision is not supported.
|
||||
|
||||
The cache PVC behavior could be further fine tuned through `thresholdInGB`. Its value is compared to the size of the backup, if the size is smaller than this value, no cache PVC would be created when restoring from the backup. This ensures that cache PVCs are not created in vain when the backup size is too small and can be accommodated in the data mover pods' root disk.
|
||||
|
||||
This configuration decides whether and how to provision cache PVCs, but it doesn't decide their size. Instead, the size is decided by the specific backup repository. Specifically, Velero asks a cache limit from the backup repository and uses this limit to calculate the cache PVC size.
|
||||
The cache limit is decided by the backup repository itself, for Kopia repository, if `cacheLimitMB` is specified in the backup repository configuration, its value will be used; otherwise, a default limit (5 GB) is used.
|
||||
Then Velero inflates the limit by 20% by considering the non-payload overheads and delay cache cleanup behavior varying on backup repositories.
|
||||
|
||||
Take Kopia repository and the above cache PVC configuration for example:
|
||||
- When `cacheLimitMB` is not available for the repository, a 6GB cache PVC is created for the backup that is larger than 1GB; otherwise, no cache volume is created
|
||||
- When `cacheLimitMB` is specified as `10240` for the repository, a 12GB cache PVC is created for the backup that is larger than 1GB; otherwise, no cache volume is created
|
||||
|
||||
To enable both the node-agent configMap and backup repository configMap, specify the flags in velero installation by CLI:
|
||||
`velero install --node-agent-configmap=<ConfigMap-Name> --backup-repository-configmap=<ConfigMap-Name>`
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
[1]: backup-repository-configuration.md
|
||||
@@ -3,6 +3,8 @@ title: "Node Selection for Data Movement"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
> **📖 For a comprehensive guide** covering all node-agent configuration options, see [Node-agent Configuration](node-agent-config.md).
|
||||
|
||||
Velero node-agent is a DaemonSet hosting the data movement modules to complete the concrete work of backups/restores.
|
||||
Varying from the data size, data complexity, resource availability, the data movement may take a long time and remarkable resources (CPU, memory, network bandwidth, etc.) during the backup and restore.
|
||||
|
||||
@@ -258,3 +260,13 @@ volumeBindingMode: Immediate
|
||||
Because the StorageClass volumeBindingMode is `Immediate`, although `ignoreDelayBinding` is set to `false`, restorePVC will not be created according to the target Pod.
|
||||
|
||||
The restorePod will be assigned to nodes, which instance type is `Standard_B4ms`.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
@@ -3,6 +3,8 @@ title: "Data Movement Pod Resource Configuration"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
> **📖 For a comprehensive guide** covering all node-agent configuration options, see [Node-agent Configuration](node-agent-config.md).
|
||||
|
||||
During [CSI Snapshot Data Movement][1], Velero built-in data mover launches data mover pods to run the data transfer.
|
||||
During [fs-backup][2], Velero also launches data mover pods to run the data transfer.
|
||||
The data transfer is a time and resource consuming activity.
|
||||
@@ -123,6 +125,16 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
|
||||
|
||||
**Note**: If the specified priority class doesn't exist in the cluster when data mover pods are created, the pods will fail to schedule. Velero validates the priority class at startup and logs a warning if it doesn't exist, but the pods will still attempt to use it.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/
|
||||
|
||||
@@ -3,6 +3,8 @@ title: "RestorePVC Configuration for Data Movement Restore"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
> **📖 For a comprehensive guide** covering all node-agent configuration options, see [Node-agent Configuration](node-agent-config.md).
|
||||
|
||||
`RestorePVC` is an intermediate PVC to write data during the data movement restore operation.
|
||||
|
||||
In some scenarios users may need to configure some advanced options of the `restorePVC` so that the data movement restore operation could perform better. Specifically:
|
||||
@@ -28,3 +30,13 @@ A sample of `restorePVC` config as part of the ConfigMap would look like:
|
||||
**Note:**
|
||||
- If `ignoreDelayBinding` is set, the restored volume is provisioned in the storage areas associated to an arbitrary node, if the restored pod cannot be scheduled to that node, e.g., because of topology constraints, the data mover restore still completes, but the workload is not usable since the restored pod cannot mount the restored volume
|
||||
- At present, node selection is not supported for data mover restore, so the restored volume may be attached to any node in the cluster; once node selection is supported and enabled, the restored volume will be attached to one of the selected nodes only. In this way, node selection and `ignoreDelayBinding` can work together even though the environment is with topology constraints
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
@@ -631,6 +631,10 @@ However, if you run a backup which aborts halfway(some internal snapshots are th
|
||||
By default, one `PodVolumeBackup`/`PodVolumeRestore` request is handled in a node at a time. You can configure more parallelism per node by [node-agent Concurrency Configuration][19].
|
||||
By the meantime, one data mover pod is created for each volume to be backed up/restored, if there is no available concurrency quota, the data mover pod has to wait there. To make a control of the data mover pods, you can configure the [node-agent Prepare Queue Length][20].
|
||||
|
||||
For each volume, files in the volume are processed in parallel. You can use `--parallel-files-upload` backup flag or `--parallel-files-download` restore flag to control how many files are processed in parallel. Otherwise, if they are not set, Velero by default refers to the number of CPU cores in the node (where the backup/restore is running) for the parallelism. That is to say, the parallelism is not affected by the CPU request/limit set to the data mover pods.
|
||||
|
||||
Notice that Golang 1.25 and later respects the CPU limit set to the pods to decide the physical threads provisioned to the pod processes (see [Container-aware GOMAXPROCS][23] for more details), so for Velero 1.18 (which consumes Golang 1.25) and later, if you set a CPU limit to the data mover pods, you may not get the expected performance (e.g., backup/restore throughput) with the default parallelism. The outcome may or may not be obvious varying on your volume data. If it is required, you could customize `--parallel-files-upload` or `--parallel-files-download` according to the CPU limit set to the data mover pods.
|
||||
|
||||
### Restart and resume
|
||||
When Velero server is restarted, the running backups/restores will be marked as `Failed`. The corresponding `PodVolumeBackup`/`PodVolumeRestore` will be canceled.
|
||||
When node-agent is restarted, the controller will try to recapture and resume the `PodVolumeBackup`/`PodVolumeRestore`. If the resume fails, the `PodVolumeBackup`/`PodVolumeRestore` will be canceled.
|
||||
@@ -693,7 +697,7 @@ spec:
|
||||
|
||||
## Priority Class Configuration
|
||||
|
||||
For Velero built-in data mover, data mover pods launched during file system backup will use the priority class name configured in the node-agent configmap. The node-agent daemonset itself gets its priority class from the `--node-agent-priority-class-name` flag during Velero installation. This can help ensure proper scheduling behavior in resource-constrained environments. For more details on configuring data mover pod resources, see [Data Movement Pod Resource Configuration][data-movement-config].
|
||||
For Velero built-in data mover, data mover pods launched during file system backup will use the priority class name configured in the node-agent configmap. The node-agent daemonset itself gets its priority class from the `--node-agent-priority-class-name` flag during Velero installation. This can help ensure proper scheduling behavior in resource-constrained environments. For more details on configuring data mover pod resources, see [Data Movement Pod Resource Configuration][21].
|
||||
|
||||
## Resource Consumption
|
||||
|
||||
@@ -701,18 +705,10 @@ Both the uploader and repository consume remarkable CPU/memory during the backup
|
||||
Velero node-agent uses [BestEffort as the QoS][14] for node-agent pods (so no CPU/memory request/limit is set), so that backups/restores wouldn't fail due to resource throttling in any cases.
|
||||
If you want to constraint the CPU/memory usage, you need to [customize the resource limits][15]. The CPU/memory consumption is always related to the scale of data to be backed up/restored, refer to [Performance Guidance][16] for more details, so it is highly recommended that you perform your own testing to find the best resource limits for your data.
|
||||
|
||||
Some memory is preserved by the node-agent to avoid frequent memory allocations, therefore, after you run a file-system backup/restore, you won't see node-agent releases all the memory until it restarts. There is a limit for the memory preservation, so the memory won't increase all the time. The limit varies from the number of CPU cores in the cluster nodes, as calculated below:
|
||||
```
|
||||
preservedMemoryInOneNode = 128M + 24M * numOfCPUCores
|
||||
```
|
||||
The memory perservation only happens in the nodes where backups/restores ever occur. Assuming file-system backups/restores occur in ever worker node and you have equal CPU cores in each node, the maximum possibly preserved memory in your cluster is:
|
||||
```
|
||||
totalPreservedMemory = (128M + 24M * numOfCPUCores) * numOfWorkerNodes
|
||||
```
|
||||
However, whether and when this limit is reached is related to the data you are backing up/restoring.
|
||||
|
||||
During the restore, the repository may also cache data/metadata so as to reduce the network footprint and speed up the restore. The repository uses its own policy to store and clean up the cache.
|
||||
For Kopia repository, the cache is stored in the node-agent pod's root file system. Velero allows you to configure a limit of the cache size so that the node-agent pod won't be evicted due to running out of the ephemeral storage. For more details, check [Backup Repository Configuration][18].
|
||||
For Kopia repository, by default, the cache is stored in the data mover pod's root file system. If your root file system space is limited, the data mover pods may be evicted due to running out of the ephemeral storage, which causes the restore fails. To cope with this problem, Velero allows you:
|
||||
- configure a limit of the cache size per backup repository, for more details, check [Backup Repository Configuration][18].
|
||||
- configure a dedicated volume for cache data, for more details, check [Data Movement Cache Volume][22].
|
||||
|
||||
## Restic Deprecation
|
||||
|
||||
@@ -766,4 +762,6 @@ Velero still effectively manage restic repository, though you cannot write any n
|
||||
[18]: backup-repository-configuration.md
|
||||
[19]: node-agent-concurrency.md
|
||||
[20]: node-agent-prepare-queue-length.md
|
||||
[data-movement-config]: data-movement-pod-resource-configuration.md
|
||||
[21]: data-movement-pod-resource-configuration.md
|
||||
[22]: data-movement-cache-volume.md
|
||||
[23]: https://tip.golang.org/doc/go1.25#container-aware-gomaxprocs:~:text=Runtime%C2%B6-,Container%2Daware%20GOMAXPROCS,-%C2%B6
|
||||
|
||||
@@ -3,6 +3,8 @@ title: "Node-agent Concurrency"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
> **📖 For a comprehensive guide** covering all node-agent configuration options, see [Node-agent Configuration](node-agent-config.md).
|
||||
|
||||
Velero node-agent is a daemonset hosting modules to complete the concrete tasks of backups/restores, i.e., file system backup/restore, CSI snapshot data movement.
|
||||
Varying from the data size, data complexity, resource availability, the tasks may take a long time and remarkable resources (CPU, memory, network bandwidth, etc.). These tasks make the loads of node-agent.
|
||||
|
||||
@@ -23,7 +25,7 @@ You can specify different concurrent number per node, for example, you can set 3
|
||||
The range of Per-node concurrent number is the same with Global concurrent number. Per-node concurrent number is preferable to Global concurrent number, so it will overwrite the Global concurrent number for that node.
|
||||
|
||||
Per-node concurrent number is implemented through ```perNodeConfig``` field in ```loadConcurrency```.
|
||||
```perNodeConfig``` is a list of ```RuledConfigs``` each item of which matches one or more nodes by label selectors and specify the concurrent number for the matched nodes.
|
||||
`perNodeConfig` is a list of `RuledConfigs` each item of which matches one or more nodes by label selectors and specify the concurrent number for the matched nodes.
|
||||
Here is an example of the ```perNodeConfig``:
|
||||
```
|
||||
"nodeSelector: kubernetes.io/hostname=node1; number: 3"
|
||||
@@ -79,3 +81,13 @@ spec:
|
||||
- args:
|
||||
- --node-agent-configmap=<ConfigMap name>
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
409
site/content/docs/main/node-agent-config.md
Normal file
409
site/content/docs/main/node-agent-config.md
Normal file
@@ -0,0 +1,409 @@
|
||||
---
|
||||
title: "Node-agent Configuration"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
The Velero node-agent is a DaemonSet that hosts modules for completing backup and restore operations, including file system backup/restore and CSI snapshot data movement. This document provides comprehensive configuration options for the node-agent through a ConfigMap.
|
||||
|
||||
## Overview
|
||||
|
||||
Node-agent configuration is provided through a ConfigMap that contains JSON configuration for various aspects of data movement operations. The ConfigMap should be created in the same namespace where Velero is installed, and its name is specified using the `--node-agent-configmap` parameter.
|
||||
|
||||
### Creating and Managing the ConfigMap
|
||||
|
||||
The ConfigMap name can be specified during Velero installation:
|
||||
```bash
|
||||
velero install --node-agent-configmap=<ConfigMap-Name>
|
||||
```
|
||||
|
||||
To create the ConfigMap:
|
||||
1. Save your configuration to a JSON file
|
||||
2. Create the ConfigMap:
|
||||
```bash
|
||||
kubectl create cm <ConfigMap-Name> -n velero --from-file=<json-file-name>
|
||||
```
|
||||
|
||||
To apply the ConfigMap to the node-agent DaemonSet:
|
||||
```bash
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
|
||||
Add the ConfigMap reference to the container arguments:
|
||||
```yaml
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-configmap=<ConfigMap-Name>
|
||||
```
|
||||
|
||||
**Important**: The node-agent server checks configurations at startup time. After editing the ConfigMap, restart the node-agent DaemonSet for changes to take effect.
|
||||
|
||||
## Configuration Sections
|
||||
|
||||
### Load Concurrency (`loadConcurrency`)
|
||||
|
||||
Controls the concurrent number of data movement operations per node to optimize resource usage and performance.
|
||||
|
||||
#### Global Configuration
|
||||
Sets a default concurrent number applied to all nodes:
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"globalConfig": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Per-node Configuration
|
||||
Specify different concurrent numbers for specific nodes using label selectors:
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"globalConfig": 2,
|
||||
"perNodeConfig": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/hostname": "node1"
|
||||
}
|
||||
},
|
||||
"number": 3
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"beta.kubernetes.io/instance-type": "Standard_B4ms"
|
||||
}
|
||||
},
|
||||
"number": 5
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **Range**: Starts from 1 (no concurrency), no upper limit
|
||||
- **Priority**: Per-node configuration overrides global configuration
|
||||
- **Conflicts**: If a node matches multiple rules, the smallest number is used
|
||||
- **Default**: 1 if not specified
|
||||
|
||||
**Use Cases:**
|
||||
- Increase concurrency on nodes with more resources
|
||||
- Reduce concurrency on nodes with limited resources or critical workloads
|
||||
- Prevent OOM kills and resource contention
|
||||
|
||||
For detailed information, see [Node-agent Concurrency](node-agent-concurrency.md).
|
||||
|
||||
### Node Selection (`loadAffinity`)
|
||||
|
||||
Constrains which nodes can run data movement operations using affinity and anti-affinity rules.
|
||||
|
||||
```json
|
||||
{
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"beta.kubernetes.io/instance-type": "Standard_B4ms"
|
||||
},
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "kubernetes.io/hostname",
|
||||
"values": ["node-1", "node-2", "node-3"],
|
||||
"operator": "In"
|
||||
},
|
||||
{
|
||||
"key": "critical-workload",
|
||||
"operator": "DoesNotExist"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Storage Class Specific Selection
|
||||
Configure different node selection rules for specific storage classes:
|
||||
```json
|
||||
{
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"environment": "production"
|
||||
}
|
||||
},
|
||||
"storageClass": "fast-ssd"
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"environment": "backup"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Important Limitations:**
|
||||
- Only the first element in the `loadAffinity` array is used for general node selection
|
||||
- Additional elements are only considered if they have a `storageClass` field
|
||||
- To combine multiple conditions, use both `matchLabels` and `matchExpressions` in a single element
|
||||
|
||||
**Use Cases:**
|
||||
- Prevent data movement on nodes with critical workloads
|
||||
- Run data movement only on nodes with sufficient resources
|
||||
- Ensure data movement runs only on nodes where storage is accessible
|
||||
- Comply with topology constraints
|
||||
|
||||
For detailed information, see [Node Selection for Data Movement](data-movement-node-selection.md).
|
||||
|
||||
### Pod Resources (`podResources`)
|
||||
|
||||
Configure CPU and memory resources for data mover pods to optimize performance and prevent resource conflicts.
|
||||
|
||||
```json
|
||||
{
|
||||
"podResources": {
|
||||
"cpuRequest": "1000m",
|
||||
"cpuLimit": "2000m",
|
||||
"memoryRequest": "1Gi",
|
||||
"memoryLimit": "4Gi"
|
||||
},
|
||||
"priorityClassName": "backup-priority"
|
||||
}
|
||||
```
|
||||
|
||||
#### Resource Configuration
|
||||
- **Values**: Must be valid Kubernetes Quantity expressions
|
||||
- **Validation**: Request values must not exceed limit values
|
||||
- **Default**: BestEffort QoS if not specified
|
||||
- **Failure Handling**: Invalid values cause the entire `podResources` section to be ignored
|
||||
|
||||
#### Priority Class Configuration
|
||||
Configure pod priority to control scheduling behavior:
|
||||
|
||||
**High Priority** (e.g., `system-cluster-critical`):
|
||||
- ✅ Faster scheduling and less likely to be preempted
|
||||
- ❌ May impact production workload performance
|
||||
|
||||
**Low Priority** (e.g., `low-priority`):
|
||||
- ✅ Protects production workloads from resource competition
|
||||
- ❌ May delay backup operations or cause preemption
|
||||
|
||||
**Use Cases:**
|
||||
- Limit resource consumption in resource-constrained clusters
|
||||
- Guarantee resources for time-critical backup/restore operations
|
||||
- Prevent OOM kills during large data transfers
|
||||
- Control scheduling priority relative to production workloads
|
||||
|
||||
For detailed information, see [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md).
|
||||
|
||||
### Backup PVC Configuration (`backupPVC`)
|
||||
|
||||
Configure intermediate PVCs used during data movement backup operations for optimal performance.
|
||||
|
||||
```json
|
||||
{
|
||||
"backupPVC": {
|
||||
"source-storage-class": {
|
||||
"storageClass": "backup-optimized-class",
|
||||
"readOnly": true,
|
||||
"spcNoRelabeling": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Configuration Options
|
||||
- **`storageClass`**: Alternative storage class for backup PVCs (defaults to source PVC's storage class)
|
||||
- **`readOnly`**: Use `ReadOnlyMany` access mode for faster volume creation from snapshots
|
||||
- **`spcNoRelabeling`**: Required in SELinux clusters when using `readOnly` mode
|
||||
|
||||
#### Storage Class Mapping
|
||||
Configure different backup PVC settings per source storage class:
|
||||
```json
|
||||
{
|
||||
"backupPVC": {
|
||||
"fast-storage": {
|
||||
"storageClass": "backup-storage",
|
||||
"readOnly": true
|
||||
},
|
||||
"slow-storage": {
|
||||
"storageClass": "backup-storage"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- Use read-only volumes for faster snapshot-to-volume conversion
|
||||
- Use dedicated storage classes optimized for backup operations
|
||||
- Reduce replica count for intermediate backup volumes
|
||||
- Comply with SELinux requirements in secured environments
|
||||
|
||||
**Important Notes:**
|
||||
- Ensure specified storage classes exist and support required access modes
|
||||
- In SELinux environments, always set `spcNoRelabeling: true` when using `readOnly: true`
|
||||
- Failures result in DataUpload CR staying in `Accepted` phase until timeout (30m default)
|
||||
|
||||
For detailed information, see [BackupPVC Configuration for Data Movement Backup](data-movement-backup-pvc-configuration.md).
|
||||
|
||||
### Restore PVC Configuration (`restorePVC`)
|
||||
|
||||
Configure intermediate PVCs used during data movement restore operations.
|
||||
|
||||
```json
|
||||
{
|
||||
"restorePVC": {
|
||||
"ignoreDelayBinding": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Configuration Options
|
||||
- **`ignoreDelayBinding`**: Ignore `WaitForFirstConsumer` binding mode constraints
|
||||
|
||||
**Use Cases:**
|
||||
- Improve restore parallelism by not waiting for pod scheduling
|
||||
- Enable volume restore without requiring a pod to be mounted
|
||||
- Work around topology constraints when you know the environment setup
|
||||
|
||||
**Important Notes:**
|
||||
- Use only when you understand your cluster's topology constraints
|
||||
- May result in volumes provisioned on nodes where workload pods cannot be scheduled
|
||||
- Works best with node selection to ensure proper node targeting
|
||||
|
||||
For detailed information, see [RestorePVC Configuration for Data Movement Restore](data-movement-restore-pvc-configuration.md).
|
||||
|
||||
### Prepare Queue Length (`prepareQueueLength`)
|
||||
|
||||
Control the maximum number of backup/restore operations that can be in preparation phases simultaneously.
|
||||
|
||||
```json
|
||||
{
|
||||
"prepareQueueLength": 10
|
||||
}
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- Limit resource consumption from intermediate objects (PVCs, VolumeSnapshots, etc.)
|
||||
- Prevent resource exhaustion when backup/restore concurrency is limited
|
||||
- Balance between parallelism and resource usage
|
||||
|
||||
**Affected CR Phases:**
|
||||
- DataUpload/DataDownload CRs in `Accepted` or `Prepared` phases
|
||||
- PodVolumeBackup/PodVolumeRestore CRs in preparation phases
|
||||
|
||||
For detailed information, see [Node-agent Prepare Queue Length](node-agent-prepare-queue-length.md).
|
||||
|
||||
## Complete Configuration Example
|
||||
|
||||
Here's a comprehensive example showing how all configuration sections work together:
|
||||
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"globalConfig": 2,
|
||||
"perNodeConfig": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"node-type": "backup"
|
||||
}
|
||||
},
|
||||
"number": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"node-type": "backup"
|
||||
},
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "critical-workload",
|
||||
"operator": "DoesNotExist"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"storage-tier": "fast"
|
||||
}
|
||||
},
|
||||
"storageClass": "fast-ssd"
|
||||
}
|
||||
],
|
||||
"podResources": {
|
||||
"cpuRequest": "500m",
|
||||
"cpuLimit": "1000m",
|
||||
"memoryRequest": "1Gi",
|
||||
"memoryLimit": "2Gi"
|
||||
},
|
||||
"priorityClassName": "backup-priority",
|
||||
"backupPVC": {
|
||||
"fast-ssd": {
|
||||
"storageClass": "backup-optimized",
|
||||
"readOnly": true
|
||||
},
|
||||
"standard": {
|
||||
"storageClass": "backup-standard"
|
||||
}
|
||||
},
|
||||
"restorePVC": {
|
||||
"ignoreDelayBinding": true
|
||||
},
|
||||
"prepareQueueLength": 15
|
||||
}
|
||||
```
|
||||
|
||||
This configuration:
|
||||
- Allows 2 concurrent operations globally, 4 on backup nodes
|
||||
- Runs data movement only on backup nodes without critical workloads
|
||||
- Uses fast storage nodes for fast-ssd storage class operations
|
||||
- Limits pod resources to prevent cluster overload
|
||||
- Uses high priority for backup operations
|
||||
- Optimizes backup PVCs with read-only access and dedicated storage classes
|
||||
- Ignores delay binding for faster restores
|
||||
- Allows up to 15 operations in preparation phases
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **ConfigMap not taking effect**: Restart node-agent DaemonSet after changes
|
||||
2. **Invalid resource values**: Check logs for validation errors; entire section ignored on failure
|
||||
3. **Storage class not found**: Ensure specified storage classes exist in the cluster
|
||||
4. **SELinux issues**: Set `spcNoRelabeling: true` when using `readOnly: true`
|
||||
5. **Node selection not working**: Verify node labels and check only first loadAffinity element is used
|
||||
|
||||
### Validation
|
||||
|
||||
To verify your configuration is loaded correctly:
|
||||
```bash
|
||||
kubectl logs -n velero -l app=node-agent | grep -i config
|
||||
```
|
||||
|
||||
To check current node-agent configuration:
|
||||
```bash
|
||||
kubectl get cm <ConfigMap-Name> -n velero -o yaml
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
For detailed information on specific configuration sections:
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md)
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md)
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md)
|
||||
- [BackupPVC Configuration for Data Movement Backup](data-movement-backup-pvc-configuration.md)
|
||||
- [RestorePVC Configuration for Data Movement Restore](data-movement-restore-pvc-configuration.md)
|
||||
- [Node-agent Prepare Queue Length](node-agent-prepare-queue-length.md)
|
||||
@@ -3,6 +3,8 @@ title: "Node-agent Prepare Queue Length"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
> **📖 For a comprehensive guide** covering all node-agent configuration options, see [Node-agent Configuration](node-agent-config.md).
|
||||
|
||||
During [CSI Snapshot Data Movement][1], Velero built-in data mover launches data mover pods to run the data transfer.
|
||||
During [fs-backup][2], Velero also launches data mover pods to run the data transfer.
|
||||
Other intermediate resources may also be created along with the data mover pods, i.e., PVCs, VolumeSnapshots, VolumeSnapshotContents, etc.
|
||||
@@ -42,6 +44,16 @@ spec:
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
- [Node-agent Concurrency](node-agent-concurrency.md) - Configure concurrent operations per node
|
||||
- [Node Selection for Data Movement](data-movement-node-selection.md) - Configure which nodes run data movement
|
||||
- [Data Movement Pod Resource Configuration](data-movement-pod-resource-configuration.md) - Configure pod resources
|
||||
- [BackupPVC Configuration](data-movement-backup-pvc-configuration.md) - Configure backup storage
|
||||
- [RestorePVC Configuration](data-movement-restore-pvc-configuration.md) - Configure restore storage
|
||||
- [Cache PVC Configuration](data-movement-cache-volume.md) - Configure restore data mover storage
|
||||
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: node-agent-concurrency.md
|
||||
|
||||
10
site/content/docs/main/supported-configmaps/_index.md
Normal file
10
site/content/docs/main/supported-configmaps/_index.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
layout: docs
|
||||
title: Supported ConfigMaps
|
||||
---
|
||||
|
||||
Here's a list of ConfigMaps that Velero support, but their life cycle control are out of Velero's scope.
|
||||
|
||||
* [node-agent ConfigMap][1]
|
||||
|
||||
[1]: node-agent-configmap.md
|
||||
@@ -0,0 +1,494 @@
|
||||
---
|
||||
title: "Node-agent Configuration"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
The Velero node-agent is a DaemonSet that hosts modules for completing backup and restore operations, including file system backup/restore and CSI snapshot data movement. This document provides comprehensive configuration options for the ConfigMap provisioned by node-agent's `--node-agent-configmap` parameter.
|
||||
|
||||
## Overview
|
||||
|
||||
Node-agent puts advanced configurations of data movement and PodVolume operations into a ConfigMap that contains JSON configuration. The ConfigMap should be created in the same namespace where Velero is installed, and its name is specified using the `--node-agent-configmap` parameter.
|
||||
|
||||
### Creating and Managing the ConfigMap
|
||||
For detailed information, see [Node-agent Concurrency](../node-agent-concurrency.md).
|
||||
|
||||
**Notice**: The ConfigMap's life cycle control is out of the scope of Velero.
|
||||
User need to create and maintain the ConfigMap themselves.
|
||||
|
||||
The ConfigMap name can be specified during Velero installation:
|
||||
```bash
|
||||
velero install --node-agent-configmap=<ConfigMap-Name>
|
||||
```
|
||||
|
||||
To create the ConfigMap:
|
||||
1. Save your configuration to a JSON file
|
||||
2. Create the ConfigMap:
|
||||
```bash
|
||||
kubectl create cm <ConfigMap-Name> -n velero --from-file=<json-file-name>
|
||||
```
|
||||
|
||||
To apply the ConfigMap to the node-agent DaemonSet:
|
||||
```bash
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
|
||||
Add the ConfigMap reference to the container arguments:
|
||||
```yaml
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-configmap=<ConfigMap-Name>
|
||||
```
|
||||
**Important**: The node-agent server checks configurations at startup time. After editing the ConfigMap, restart the node-agent DaemonSet for changes to take effect.
|
||||
`kubectl rollout restart -n <velero-namespace> daemonset/node-agent`
|
||||
|
||||
## Configuration Sections
|
||||
### Load Concurrency (`loadConcurrency`)
|
||||
|
||||
Controls the concurrent number of data movement operations per node to optimize resource usage and performance.
|
||||
|
||||
For detailed information, see [Node-agent Prepare Queue Length](../node-agent-prepare-queue-length.md).
|
||||
|
||||
#### Global Configuration
|
||||
Sets a default concurrent number applied to all nodes:
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"globalConfig": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Per-node Configuration
|
||||
Specify different concurrent numbers for specific nodes using label selectors:
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"globalConfig": 2,
|
||||
"perNodeConfig": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/hostname": "node1"
|
||||
}
|
||||
},
|
||||
"number": 3
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"beta.kubernetes.io/instance-type": "Standard_B4ms"
|
||||
}
|
||||
},
|
||||
"number": 5
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **Range**: Starts from 1 (no concurrency per node), no upper limit
|
||||
- **Priority**: Per-node configuration overrides global configuration
|
||||
- **Conflicts**: If a node matches multiple rules, the smallest number is used
|
||||
- **Default**: 1 if not specified
|
||||
|
||||
**Use Cases:**
|
||||
- Increase concurrency on nodes with more resources
|
||||
- Reduce concurrency on nodes with limited resources or critical workloads
|
||||
- Prevent OOM kills and resource contention
|
||||
|
||||
#### PrepareQueueLength
|
||||
Control the maximum number of backup/restore operations that can be in preparation phases simultaneously.
|
||||
The concurrency numbers controls how many backup/restore operations can run at the same time.
|
||||
The prepare queue length controls how many backup/restore operations can create the workload pods that are pending for start.
|
||||
If there are thousands of volume B/R operations, and without this control, thousands of B/R pods will be created at the same time, then causing a big burden on the k8s API server.
|
||||
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"prepareQueueLength": 10
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **Range**: Starts from 1 (for all node-agent pods), no upper limit
|
||||
- **Scope**: This parameter controls all PVB, PVR, DataUpload, and DataDownload pods pending number. It applies to all node-agent pods.
|
||||
- **Default**: No limitation if not specified
|
||||
|
||||
**Use Cases:**
|
||||
- Prevent too much workload pods are created, but cannot start.
|
||||
- Limit resource consumption from intermediate objects (PVCs, VolumeSnapshots, etc.)
|
||||
- Prevent resource exhaustion when backup/restore concurrency is limited
|
||||
- Balance between parallelism and resource usage
|
||||
|
||||
**Affected CR Phases:**
|
||||
- DataUpload/DataDownload CRs in `Accepted` or `Prepared` phases
|
||||
- PodVolumeBackup/PodVolumeRestore CRs in preparation phases
|
||||
|
||||
### Node Selection (`loadAffinity`)
|
||||
Constrains which nodes can run data movement operations using affinity and anti-affinity rules.
|
||||
|
||||
For detailed information, see [Node Selection for Data Movement](../data-movement-node-selection.md).
|
||||
|
||||
```json
|
||||
{
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"beta.kubernetes.io/instance-type": "Standard_B4ms"
|
||||
},
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "kubernetes.io/hostname",
|
||||
"values": ["node-1", "node-2", "node-3"],
|
||||
"operator": "In"
|
||||
},
|
||||
{
|
||||
"key": "critical-workload",
|
||||
"operator": "DoesNotExist"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Storage Class Specific Selection
|
||||
Configure different node selection rules for specific storage classes:
|
||||
* For StorageClass `fast-ssd`, the first match is chosen, which is nodes with label `"environment": "production"`.
|
||||
* For StorageClass `hdd`, the nodes with label `"environment": "backup"` are chosen.
|
||||
|
||||
```json
|
||||
{
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"environment": "production"
|
||||
}
|
||||
},
|
||||
"storageClass": "fast-ssd"
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"environment": "staging"
|
||||
}
|
||||
},
|
||||
"storageClass": "fast-ssd"
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"environment": "backup"
|
||||
}
|
||||
},
|
||||
"storageClass": "hdd"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Important Limitations:**
|
||||
- Only the first element in the `loadAffinity` array is used for general node selection
|
||||
- Additional elements are only considered if they have a `storageClass` field
|
||||
- To combine multiple conditions, use both `matchLabels` and `matchExpressions` in a single element
|
||||
|
||||
**Use Cases:**
|
||||
- Prevent data movement on nodes with critical workloads
|
||||
- Run data movement only on nodes with sufficient resources
|
||||
- Ensure data movement runs only on nodes where storage is accessible
|
||||
- Comply with topology constraints
|
||||
|
||||
### Pod Resources (`podResources`)
|
||||
Configure CPU and memory resources for data mover pods to optimize performance and prevent resource conflict.
|
||||
The configurations work for PodVolumeBackup, PodVolumeRestore, DataUpload, and DataDownload pods.
|
||||
|
||||
```json
|
||||
{
|
||||
"podResources": {
|
||||
"cpuRequest": "1000m",
|
||||
"cpuLimit": "2000m",
|
||||
"memoryRequest": "1Gi",
|
||||
"memoryLimit": "4Gi"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- Limit resource consumption in resource-constrained clusters
|
||||
- Guarantee resources for time-critical backup/restore operations
|
||||
- Prevent OOM kills during large data transfers
|
||||
- Control scheduling priority relative to production workloads
|
||||
|
||||
**Values**: Must be valid Kubernetes Quantity expressions
|
||||
**Validation**: Request values must not exceed limit values
|
||||
**Default**: BestEffort QoS if not specified
|
||||
**Failure Handling**: Invalid values cause the entire `podResources` section to be ignored
|
||||
|
||||
For detailed information, see [Data Movement Pod Resource Configuration](../data-movement-pod-resource-configuration.md).
|
||||
|
||||
|
||||
### Priority Class (`priorityClassName`)
|
||||
Configure the node-agent created pod's PriorityClass.
|
||||
The configurations work for PodVolumeBackup, PodVolumeRestore, DataUpload, and DataDownload pods.
|
||||
|
||||
Configure pod priority to control scheduling behavior:
|
||||
|
||||
**High Priority** (e.g., `system-cluster-critical`):
|
||||
- ✅ Faster scheduling and less likely to be preempted
|
||||
- ❌ May impact production workload performance
|
||||
|
||||
**Low Priority** (e.g., `low-priority`):
|
||||
- ✅ Protects production workloads from resource competition
|
||||
- ❌ May delay backup operations or cause preemption
|
||||
|
||||
Example:
|
||||
|
||||
``` json
|
||||
{
|
||||
"priorityClassName": "low-priority"
|
||||
}
|
||||
```
|
||||
|
||||
### Backup PVC Configuration (`backupPVC`)
|
||||
|
||||
Configure intermediate PVCs used during data movement backup operations for optimal performance.
|
||||
|
||||
For detailed information, see [BackupPVC Configuration for Data Movement Backup](../data-movement-backup-pvc-configuration.md).
|
||||
|
||||
#### Configuration Options
|
||||
- **`storageClass`**: Alternative storage class for backup PVCs (defaults to source PVC's storage class)
|
||||
- **`readOnly`**: This is a boolean value. If set to `true` then `ReadOnlyMany` will be the only value set to the backupPVC's access modes. Otherwise `ReadWriteOnce` value will be used.
|
||||
- **`spcNoRelabeling`**: This is a boolean value. If set to true, then `pod.Spec.SecurityContext.SELinuxOptions.Type` will be set to `spc_t`. From the SELinux point of view, this will be considered a `Super Privileged Container` which means that selinux enforcement will be disabled and volume relabeling will not occur. This field is ignored if `readOnly` is `false`.
|
||||
|
||||
**Use Cases:**
|
||||
- Use read-only volumes for faster snapshot-to-volume conversion
|
||||
- Use dedicated storage classes optimized for backup operations
|
||||
- Reduce replica count for intermediate backup volumes
|
||||
- Comply with SELinux requirements in secured environments
|
||||
|
||||
**Important Notes:**
|
||||
- Ensure specified storage classes exist and support required access modes
|
||||
- In SELinux environments, always set `spcNoRelabeling: true` when using `readOnly: true`
|
||||
- Failures result in DataUpload CR staying in `Accepted` phase until timeout (30m default)
|
||||
|
||||
#### Storage Class Mapping
|
||||
Configure different backup PVC settings per source storage class:
|
||||
```json
|
||||
{
|
||||
"backupPVC": {
|
||||
"fast-storage": {
|
||||
"storageClass": "backup-storage-1"
|
||||
},
|
||||
"slow-storage": {
|
||||
"storageClass": "backup-storage-2"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### ReadOnly and SPC configuration
|
||||
Create BackupPVC in ReadOnly mode, which can avoid full data clone during backup process in some storage providers, such as Ceph RBD.
|
||||
|
||||
In an `SELinux-enabled` cluster, any time users set `readOnly=true` they must also set `spcNoRelabeling=true`.
|
||||
|
||||
```json
|
||||
{
|
||||
"backupPVC": {
|
||||
"source-storage-class": {
|
||||
"storageClass": "backup-optimized-class",
|
||||
"readOnly": true,
|
||||
"spcNoRelabeling": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Restore PVC Configuration (`restorePVC`)
|
||||
|
||||
Configure intermediate PVCs used during data movement restore operations.
|
||||
|
||||
```json
|
||||
{
|
||||
"restorePVC": {
|
||||
"ignoreDelayBinding": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Configuration Options
|
||||
- **`ignoreDelayBinding`**: Ignore `WaitForFirstConsumer` binding mode constraints
|
||||
|
||||
**Use Cases:**
|
||||
- Improve restore parallelism by not waiting for pod scheduling
|
||||
- Enable volume restore without requiring a pod to be mounted
|
||||
- Work around topology constraints when you know the environment setup
|
||||
|
||||
**Important Notes:**
|
||||
- Use only when you understand your cluster's topology constraints
|
||||
- May result in volumes provisioned on nodes where workload pods cannot be scheduled
|
||||
- Works best with node selection to ensure proper node targeting
|
||||
|
||||
For detailed information, see [RestorePVC Configuration for Data Movement Restore](../data-movement-restore-pvc-configuration.md).
|
||||
|
||||
### Privileged FS Backup and Restore (`privilegedFsBackup`)
|
||||
Add `privileged` permission in PodVolumeBackup and PodVolumeRestore created pod's `SecurityContext`, because in some k8s environments, mounting HostPath volume needs privileged permission to work.
|
||||
|
||||
In v1.17, the PodVolumeBackup and PodVolumeRestore are micro-serviced into independent pods, but they still mount the target volume by HostPath way. As a result, `privileged` permission are needed.
|
||||
|
||||
``` json
|
||||
{
|
||||
"privilegedFsBackup": true
|
||||
}
|
||||
```
|
||||
|
||||
For detailed information, see [Enable file system backup document](../customize-installation.md#enable-file-system-backup)
|
||||
|
||||
### Cache PVC Configuration (`cachePVCConfig`)
|
||||
|
||||
Configure intermediate PVCs used for data movement restore operations to cache the downloaded data.
|
||||
|
||||
For detailed information, see [Cache PVC Configuration for Data Movement Restore](../data-movement-cache-volume.md).
|
||||
|
||||
#### Configuration Options
|
||||
- **`thresholdInGB`**: Minimum backup data size (in GB) to trigger cache PVC creation during restore
|
||||
- **`storageClass`**: Storage class used to create cache PVCs.
|
||||
|
||||
**Use Cases:**
|
||||
- Improve restore performance by caching downloaded data locally
|
||||
- Reduce repeated data downloads from object storage
|
||||
- Optimize restore operations for large volumes
|
||||
|
||||
**Important Notes:**
|
||||
- Cache PVC is only created when restored data size exceeds the threshold
|
||||
- Ensure specified storage class exists and has sufficient capacity
|
||||
- Cache PVCs are temporary and cleaned up after restore completion
|
||||
|
||||
```json
|
||||
{
|
||||
"cachePVCConfig": {
|
||||
"thresholdInGB": 1,
|
||||
"storageClass": "cache-optimized-storage"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Complete Configuration Example
|
||||
Here's a comprehensive example showing how all configuration sections work together:
|
||||
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
"globalConfig": 2,
|
||||
"prepareQueueLength": 15,
|
||||
"perNodeConfig": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/hostname": "node1"
|
||||
}
|
||||
},
|
||||
"number": 3
|
||||
}
|
||||
]
|
||||
},
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"node-type": "backup"
|
||||
},
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "critical-workload",
|
||||
"operator": "DoesNotExist"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchLabels": {
|
||||
"environment": "staging"
|
||||
}
|
||||
},
|
||||
"storageClass": "fast-ssd"
|
||||
}
|
||||
],
|
||||
"podResources": {
|
||||
"cpuRequest": "500m",
|
||||
"cpuLimit": "1000m",
|
||||
"memoryRequest": "1Gi",
|
||||
"memoryLimit": "2Gi"
|
||||
},
|
||||
"priorityClassName": "backup-priority",
|
||||
"backupPVC": {
|
||||
"fast-storage": {
|
||||
"storageClass": "backup-optimized-class",
|
||||
"readOnly": true,
|
||||
"spcNoRelabeling": true
|
||||
},
|
||||
"slow-storage": {
|
||||
"storageClass": "backup-storage-2"
|
||||
}
|
||||
},
|
||||
"restorePVC": {
|
||||
"ignoreDelayBinding": true
|
||||
},
|
||||
"privilegedFsBackup": true,
|
||||
"cachePVC": {
|
||||
"thresholdInGB": 1,
|
||||
"storageClass": "cache-optimized-storage"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration:
|
||||
- Allows 2 concurrent operations globally, 3 on worker `node1`
|
||||
- Allows up to 15 operations in preparation phases
|
||||
- Runs data movement only on backup nodes without critical workloads
|
||||
- Uses fast storage nodes for fast-ssd storage class operations
|
||||
- Limits pod resources to prevent cluster overload
|
||||
- Uses high priority for backup operations
|
||||
- Optimizes backup PVCs with read-only access and dedicated storage classes
|
||||
- Ignores delay binding for faster restores
|
||||
- Enable privileged permission for PodVolume pods
|
||||
- Enable cache PVC for FS restore
|
||||
- The cache threshold is 1GB and use dedicated StorageClass
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **ConfigMap not taking effect**: Restart node-agent DaemonSet after changes
|
||||
2. **Invalid resource values**: Check logs for validation errors; entire section ignored on failure
|
||||
3. **Storage class not found**: Ensure specified storage classes exist in the cluster
|
||||
4. **SELinux issues**: Set `spcNoRelabeling: true` when using `readOnly: true`
|
||||
5. **Node selection not working**: Verify node labels and check only first loadAffinity element is used
|
||||
|
||||
### Validation
|
||||
|
||||
To verify your configuration is loaded correctly:
|
||||
```bash
|
||||
kubectl logs -n velero -l app=node-agent | grep -i config
|
||||
```
|
||||
|
||||
To check current node-agent configuration:
|
||||
```bash
|
||||
kubectl get cm <ConfigMap-Name> -n velero -o yaml
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
For detailed information on specific configuration sections:
|
||||
- [Node-agent Concurrency](../node-agent-concurrency.md)
|
||||
- [Node Selection for Data Movement](../data-movement-node-selection.md)
|
||||
- [Data Movement Pod Resource Configuration](../data-movement-pod-resource-configuration.md)
|
||||
- [BackupPVC Configuration for Data Movement Backup](../data-movement-backup-pvc-configuration.md)
|
||||
- [RestorePVC Configuration for Data Movement Restore](../data-movement-restore-pvc-configuration.md)
|
||||
- [Node-agent Prepare Queue Length](../node-agent-prepare-queue-length.md)
|
||||
- [Cache PVC Configuration for Data Movement Restore](../data-movement-cache-volume.md)
|
||||
@@ -298,6 +298,96 @@ You can customize the label key that Velero uses to identify VGS groups. This is
|
||||
|
||||
3. **Default Value (Lowest Priority):** If you don't provide any custom configuration, Velero defaults to using `velero.io/volume-group`.
|
||||
|
||||
## Volume Policies and VolumeGroupSnapshots
|
||||
|
||||
Volume policies control which volumes should be backed up and how (snapshot vs filesystem backup). When using VolumeGroupSnapshots, volume policies are applied **before** grouping PVCs.
|
||||
|
||||
### How Volume Policies Affect VGS
|
||||
|
||||
When Velero processes PVCs for a VolumeGroupSnapshot:
|
||||
|
||||
1. **Label Matching:** All PVCs with the matching VGS label are identified
|
||||
2. **Policy Filtering:** Volume policies are evaluated for each PVC
|
||||
3. **Group Creation:** Only PVCs that should be snapshotted (not excluded by policy) are included in the VGS
|
||||
4. **Warning Logging:** If any PVCs are excluded from the group by volume policy, a warning is logged
|
||||
|
||||
This behavior ensures that volume policies take precedence over VGS labels. The VGS label indicates "group these volumes **if they're being backed up**", while the volume policy determines "which volumes to back up".
|
||||
|
||||
### Example Scenario
|
||||
|
||||
Consider an application with mixed storage types where some volumes should be excluded:
|
||||
|
||||
```yaml
|
||||
# Database PVC using CSI driver (should be backed up)
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: db-data
|
||||
namespace: my-app
|
||||
labels:
|
||||
app.kubernetes.io/instance: myapp # VGS label
|
||||
spec:
|
||||
storageClassName: csi-storage
|
||||
# ...
|
||||
|
||||
---
|
||||
# Config PVC using NFS (should be excluded)
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: config-data
|
||||
namespace: my-app
|
||||
labels:
|
||||
app.kubernetes.io/instance: myapp # Same VGS label
|
||||
spec:
|
||||
storageClassName: nfs-storage
|
||||
# ...
|
||||
```
|
||||
|
||||
**Volume Policy Configuration:**
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: velero-volume-policies
|
||||
namespace: velero
|
||||
data:
|
||||
volume-policy: |
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
```
|
||||
|
||||
**Backup Configuration:**
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: Backup
|
||||
metadata:
|
||||
name: myapp-backup
|
||||
spec:
|
||||
includedNamespaces:
|
||||
- my-app
|
||||
volumeGroupSnapshotLabelKey: app.kubernetes.io/instance
|
||||
resourcePolicy:
|
||||
kind: ConfigMap
|
||||
name: velero-volume-policies
|
||||
```
|
||||
|
||||
**Result:**
|
||||
- The NFS PVC (`config-data`) is filtered out by the volume policy
|
||||
- Only the CSI PVC (`db-data`) is included in the VolumeGroupSnapshot
|
||||
- A warning is logged: `PVC my-app/config-data has VolumeGroupSnapshot label app.kubernetes.io/instance=myapp but is excluded by volume policy`
|
||||
- The backup succeeds with a single-volume VGS instead of failing with "multiple CSI drivers" error
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Use Specific Labels:** When possible, use VGS labels that only target volumes you want to group, rather than relying on volume policies for filtering
|
||||
2. **Monitor Warnings:** Review backup logs for volume policy exclusion warnings to ensure intended PVCs are being backed up
|
||||
3. **Test Configurations:** Verify that your volume policy and VGS label combinations produce the expected grouping in a test environment
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues and Solutions
|
||||
@@ -334,6 +424,36 @@ kubectl logs -n kube-system -l app=ebs-csi-controller --tail=100
|
||||
- Check VolumeGroupSnapshotClass configuration
|
||||
- Ensure storage backend supports group snapshots
|
||||
|
||||
#### Multiple CSI Drivers Error
|
||||
|
||||
**Symptoms:** Backup fails with error about multiple CSI drivers found
|
||||
```
|
||||
Error backing up item: failed to determine CSI driver for PVCs in VolumeGroupSnapshot group:
|
||||
found multiple CSI drivers: linstor.csi.linbit.com and nfs.csi.k8s.io
|
||||
```
|
||||
|
||||
**Cause:** PVCs with the same VGS label use different CSI drivers or include non-CSI volumes
|
||||
|
||||
**Solutions:**
|
||||
1. Use more specific labels that only match PVCs using the same CSI driver
|
||||
2. Use volume policies to exclude PVCs that shouldn't be snapshotted:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: velero-volume-policies
|
||||
namespace: velero
|
||||
data:
|
||||
volume-policy: |
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
```
|
||||
3. Check backup logs for volume policy warnings to verify filtering is working
|
||||
|
||||
#### VolumeGroupSnapshot Setup: Default VolumeSnapshotClass Required
|
||||
|
||||
**Issue**
|
||||
|
||||
@@ -44,9 +44,7 @@ toc:
|
||||
- page: Restore Resource Modifiers
|
||||
url: /restore-resource-modifiers
|
||||
- page: Run in any namespace
|
||||
url: /namespace
|
||||
- page: File system backup
|
||||
url: /file-system-backup
|
||||
url: /namespace
|
||||
- page: CSI Support
|
||||
url: /csi
|
||||
- page: Volume Group Snapshots
|
||||
@@ -67,6 +65,8 @@ toc:
|
||||
subfolderitems:
|
||||
- page: CSI Snapshot Data Mover
|
||||
url: /csi-snapshot-data-movement
|
||||
- page: File system backup
|
||||
url: /file-system-backup
|
||||
- page: Data Movement Backup PVC Configuration
|
||||
url: /data-movement-backup-pvc-configuration
|
||||
- page: Data Movement Restore PVC Configuration
|
||||
@@ -75,8 +75,14 @@ toc:
|
||||
url: /data-movement-pod-resource-configuration
|
||||
- page: Data Movement Node Selection Configuration
|
||||
url: /data-movement-node-selection
|
||||
- page: Data Movement Cache PVC Configuration
|
||||
url: /data-movement-cache-volume.md
|
||||
- page: Node-agent Concurrency
|
||||
url: /node-agent-concurrency
|
||||
- page: Node-agent Prepare Queue Length
|
||||
url: /node-agent-prepare-queue-length
|
||||
- page: Date Movement Cache Volume
|
||||
url: /data-movement-cache-volume
|
||||
- title: Plugins
|
||||
subfolderitems:
|
||||
- page: Overview
|
||||
@@ -117,6 +123,8 @@ toc:
|
||||
url: /output-file-format
|
||||
- page: API types
|
||||
url: /api-types
|
||||
- page: Supported ConfigMap
|
||||
url: /supported-configmaps
|
||||
- page: Support process
|
||||
url: /support-process
|
||||
- page: For maintainers
|
||||
|
||||
@@ -142,6 +142,7 @@ func (m *migrationE2E) Backup() error {
|
||||
"Fail to set images for the migrate-from Velero installation.")
|
||||
|
||||
m.veleroCLI2Version.VeleroCLI, err = veleroutil.InstallVeleroCLI(
|
||||
m.Ctx,
|
||||
m.veleroCLI2Version.VeleroVersion)
|
||||
Expect(err).To(Succeed())
|
||||
}
|
||||
|
||||
@@ -115,7 +115,10 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||
//Download velero CLI if it's empty according to velero CLI version
|
||||
By(fmt.Sprintf("Install the expected old version Velero CLI (%s) for installing Velero",
|
||||
veleroCLI2Version.VeleroVersion), func() {
|
||||
veleroCLI2Version.VeleroCLI, err = InstallVeleroCLI(veleroCLI2Version.VeleroVersion)
|
||||
veleroCLI2Version.VeleroCLI, err = InstallVeleroCLI(
|
||||
oneHourTimeout,
|
||||
veleroCLI2Version.VeleroVersion,
|
||||
)
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user