mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-03-27 03:55:04 +00:00
Compare commits
2 Commits
v1.16.2-rc
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5c9da5b6a | ||
|
|
5d4a55ce1a |
58
.github/workflows/e2e-test-kind.yaml
vendored
58
.github/workflows/e2e-test-kind.yaml
vendored
@@ -42,42 +42,28 @@ jobs:
|
||||
- name: Build Velero Image
|
||||
if: steps.image-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
|
||||
docker save velero:pr-test-linux-amd64 -o ./velero.tar
|
||||
# Create json of k8s versions to test
|
||||
# from guide: https://stackoverflow.com/a/65094398/4590470
|
||||
setup-test-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Set k8s versions
|
||||
id: set-matrix
|
||||
# everything excluding older tags. limits needs to be high enough to cover all latest versions
|
||||
# and test labels
|
||||
# grep -E "v[1-9]\.(2[5-9]|[3-9][0-9])" filters for v1.25 to v9.99
|
||||
# and removes older patches of the same minor version
|
||||
# awk -F. '{if(!a[$1"."$2]++)print $1"."$2"."$NF}'
|
||||
run: |
|
||||
echo "matrix={\
|
||||
\"k8s\":$(wget -q -O - "https://hub.docker.com/v2/namespaces/kindest/repositories/node/tags?page_size=50" | grep -o '"name": *"[^"]*' | grep -o '[^"]*$' | grep -v -E "alpha|beta" | grep -E "v[1-9]\.(2[5-9]|[3-9][0-9])" | awk -F. '{if(!a[$1"."$2]++)print $1"."$2"."$NF}' | sort -r | sed s/v//g | jq -R -c -s 'split("\n")[:-1]'),\
|
||||
\"labels\":[\
|
||||
\"Basic && (ClusterResource || NodePort || StorageClass)\", \
|
||||
\"ResourceFiltering && !Restic\", \
|
||||
\"ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources\", \
|
||||
\"(NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)\"\
|
||||
]}" >> $GITHUB_OUTPUT
|
||||
|
||||
IMAGE=velero VERSION=pr-test make container
|
||||
docker save velero:pr-test -o ./velero.tar
|
||||
# Run E2E test against all Kubernetes versions on kind
|
||||
run-e2e-test:
|
||||
needs:
|
||||
- build
|
||||
- setup-test-matrix
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
|
||||
matrix:
|
||||
k8s:
|
||||
- 1.23.17
|
||||
- 1.24.17
|
||||
- 1.25.16
|
||||
- 1.26.13
|
||||
- 1.27.10
|
||||
- 1.28.6
|
||||
- 1.29.1
|
||||
labels:
|
||||
# labels are used to filter running E2E cases
|
||||
- Basic && (ClusterResource || NodePort || StorageClass)
|
||||
- ResourceFiltering && !Restic
|
||||
- ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources
|
||||
- (NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
@@ -92,7 +78,7 @@ jobs:
|
||||
- uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
skipClusterLogsExport: true
|
||||
version: "v0.27.0"
|
||||
version: "v0.21.0"
|
||||
image: "kindest/node:v${{ matrix.k8s }}"
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
@@ -121,8 +107,6 @@ jobs:
|
||||
curl -LO https://dl.k8s.io/release/v${{ matrix.k8s }}/bin/linux/amd64/kubectl
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
|
||||
git clone https://github.com/vmware-tanzu-experiments/distributed-data-generator.git -b main /tmp/kibishii
|
||||
|
||||
GOPATH=~/go \
|
||||
CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws \
|
||||
@@ -133,10 +117,8 @@ jobs:
|
||||
ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential \
|
||||
ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
VELERO_IMAGE=velero:pr-test-linux-amd64 \
|
||||
PLUGINS=velero/velero-plugin-for-aws:latest \
|
||||
VELERO_IMAGE=velero:pr-test \
|
||||
GINKGO_LABELS="${{ matrix.labels }}" \
|
||||
KIBISHII_DIRECTORY=/tmp/kibishii/kubernetes/yaml/ \
|
||||
make -C test/ run-e2e
|
||||
timeout-minutes: 30
|
||||
- name: Upload debug bundle
|
||||
|
||||
2
.github/workflows/pr-ci-check.yml
vendored
2
.github/workflows/pr-ci-check.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
|
||||
2
.github/workflows/pr-codespell.yml
vendored
2
.github/workflows/pr-codespell.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elsewhere.
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
|
||||
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast,notin,sme,optin
|
||||
check_filenames: true
|
||||
|
||||
2
.github/workflows/pr-linter-check.yml
vendored
2
.github/workflows/pr-linter-check.yml
vendored
@@ -20,5 +20,5 @@ jobs:
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.64.5
|
||||
version: v1.57.2
|
||||
args: --verbose
|
||||
|
||||
39
.github/workflows/push.yml
vendored
39
.github/workflows/push.yml
vendored
@@ -20,6 +20,15 @@ jobs:
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCS_SA_KEY }}'
|
||||
- name: 'set up GCloud SDK'
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
- name: 'use gcloud CLI'
|
||||
run: |
|
||||
gcloud info
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -38,11 +47,17 @@ jobs:
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v3'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
@@ -53,4 +68,24 @@ jobs:
|
||||
|
||||
# Build and push Velero image to docker registry
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
./hack/docker-push.sh
|
||||
VERSION=$(./hack/docker-push.sh | grep 'VERSION:' | awk -F: '{print $2}' | xargs)
|
||||
|
||||
# Upload Velero image package to GCS
|
||||
source hack/ci/build_util.sh
|
||||
BIN=velero
|
||||
RESTORE_HELPER_BIN=velero-restore-helper
|
||||
GCS_BUCKET=velero-builds
|
||||
VELERO_IMAGE=${BIN}-${VERSION}
|
||||
VELERO_RESTORE_HELPER_IMAGE=${RESTORE_HELPER_BIN}-${VERSION}
|
||||
VELERO_IMAGE_FILE=${VELERO_IMAGE}.tar.gz
|
||||
VELERO_RESTORE_HELPER_IMAGE_FILE=${VELERO_RESTORE_HELPER_IMAGE}.tar.gz
|
||||
VELERO_IMAGE_BACKUP_FILE=${VELERO_IMAGE}-'build.'${GITHUB_RUN_NUMBER}.tar.gz
|
||||
VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE=${VELERO_RESTORE_HELPER_IMAGE}-'build.'${GITHUB_RUN_NUMBER}.tar.gz
|
||||
|
||||
cp ${VELERO_IMAGE_FILE} ${VELERO_IMAGE_BACKUP_FILE}
|
||||
cp ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE}
|
||||
|
||||
uploader ${VELERO_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
|
||||
2
.github/workflows/stale-issues.yml
vendored
2
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9.1.0
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -53,8 +53,4 @@ tilt-resources/cloud
|
||||
# test generated files
|
||||
test/e2e/report.xml
|
||||
coverage.out
|
||||
__debug_bin*
|
||||
debug.test*
|
||||
|
||||
# make lint cache
|
||||
.cache/
|
||||
__debug_bin*
|
||||
176
.golangci.yaml
176
.golangci.yaml
@@ -12,6 +12,11 @@ run:
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
@@ -27,6 +32,7 @@ run:
|
||||
# If false (default) - golangci-lint acquires file lock on start.
|
||||
allow-parallel-runners: false
|
||||
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
@@ -40,25 +46,18 @@ output:
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
# make issues output unique by line, default is true
|
||||
uniq-by-line: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
# specify an error message to output when a denylisted package is used
|
||||
- pkg: github.com/sirupsen/logrus
|
||||
desc: "logging is allowed only by logutils.Log"
|
||||
|
||||
dogsled:
|
||||
# checks assignments with too many blank identifiers; default is 2
|
||||
max-blank-identifiers: 2
|
||||
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
|
||||
errcheck:
|
||||
# report about not checking of errors in type assertions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
@@ -76,31 +75,25 @@ linters-settings:
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
# exclude: /path/to/file.txt
|
||||
|
||||
exhaustive:
|
||||
# indicates that switch statements are to be considered exhaustive if a
|
||||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: false
|
||||
|
||||
funlen:
|
||||
lines: 60
|
||||
statements: 40
|
||||
|
||||
gocognit:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
|
||||
nestif:
|
||||
# minimal complexity of if statements to report, 5 by default
|
||||
min-complexity: 4
|
||||
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 5
|
||||
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
@@ -125,15 +118,12 @@ linters-settings:
|
||||
paramsOnly: true
|
||||
# rangeValCopy:
|
||||
# sizeThreshold: 32
|
||||
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
|
||||
godot:
|
||||
# check all top-level comments, not only declarations
|
||||
check-all: false
|
||||
|
||||
godox:
|
||||
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
|
||||
# might be left in the code accidentally and should be resolved before merging
|
||||
@@ -141,20 +131,35 @@ linters-settings:
|
||||
- NOTE
|
||||
- OPTIMIZE # marks code that should be optimized before merging
|
||||
- HACK # marks hack-arounds that should be removed before merging
|
||||
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/org/project
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115
|
||||
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gomnd:
|
||||
# the list of enabled checks, see https://github.com/tommy-muehle/go-mnd/#checks for description.
|
||||
checks: argument,case,condition,operation,return,assign
|
||||
gomodguard:
|
||||
allowed:
|
||||
modules: # List of allowed modules
|
||||
# - gopkg.in/yaml.v2
|
||||
domains: # List of allowed module domains
|
||||
# - golang.org
|
||||
blocked:
|
||||
modules: # List of blocked modules
|
||||
# - github.com/uudashr/go-module: # Blocked module
|
||||
# recommendations: # Recommended modules that should be used instead (Optional)
|
||||
# - golang.org/x/mod
|
||||
# reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional)
|
||||
versions: # List of blocked module version constraints
|
||||
# - github.com/mitchellh/go-homedir: # Blocked module with version constraint
|
||||
# version: "< 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons
|
||||
# reason: "testing if blocked version constraint works." # Reason why the version constraint exists. (Optional)
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
# check-shadowing: true
|
||||
@@ -175,14 +180,23 @@ linters-settings:
|
||||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
|
||||
depguard:
|
||||
list-type: blacklist # Velero.io word list : ignore
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/sirupsen/logrus
|
||||
packages-with-error-message:
|
||||
# specify an error message to output when a denylisted package is used
|
||||
- github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
@@ -190,11 +204,9 @@ linters-settings:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- someword
|
||||
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
@@ -204,74 +216,25 @@ linters-settings:
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
|
||||
nolintlint:
|
||||
# Enable to ensure that nolint directives are all used. Default is true.
|
||||
allow-unused: false
|
||||
# Disable to ensure that nolint directives don't have a leading space. Default is true.
|
||||
allow-leading-space: true
|
||||
# Exclude following linters from requiring an explanation. Default is [].
|
||||
allow-no-explanation: []
|
||||
# Enable to require an explanation of nonzero length after each nolint directive. Default is false.
|
||||
require-explanation: true
|
||||
# Enable to require nolint directives to mention the specific linter being suppressed. Default is false.
|
||||
require-specific: true
|
||||
|
||||
perfsprint:
|
||||
strconcat: false
|
||||
sprintf1: false
|
||||
errorf: false
|
||||
int-conversion: true
|
||||
|
||||
revive:
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: true
|
||||
- name: context-as-argument
|
||||
disabled: true
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
disabled: true
|
||||
- name: early-return
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: true
|
||||
- name: error-return
|
||||
disabled: true
|
||||
- name: error-strings
|
||||
disabled: true
|
||||
- name: errorf
|
||||
disabled: true
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
disabled: true
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: use-any
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
disabled: true
|
||||
|
||||
|
||||
rowserrcheck:
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
|
||||
testifylint:
|
||||
# TODO: enable them all
|
||||
disable:
|
||||
@@ -279,7 +242,6 @@ linters-settings:
|
||||
- float-compare
|
||||
- require-error
|
||||
enable-all: true
|
||||
|
||||
testpackage:
|
||||
# regexp pattern to skip files
|
||||
skip-regexp: (export|internal)_test\.go
|
||||
@@ -289,11 +251,15 @@ linters-settings:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
|
||||
wsl:
|
||||
# If true append is only allowed to be cuddled if appending value is
|
||||
# matching variables, fields or types on line above. Default is true.
|
||||
@@ -311,7 +277,7 @@ linters-settings:
|
||||
force-case-trailing-whitespace: 0
|
||||
# Force cuddling of err checks with err var assignment
|
||||
force-err-cuddling: false
|
||||
# Allow leading comments to be separated with empty lines
|
||||
# Allow leading comments to be separated with empty liens
|
||||
allow-separated-leading-comment: false
|
||||
|
||||
linters:
|
||||
@@ -321,11 +287,11 @@ linters:
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- copyloopvar
|
||||
- dogsled
|
||||
- durationcheck
|
||||
- dupword
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- errchkjson
|
||||
- goconst
|
||||
- gofmt
|
||||
@@ -344,7 +310,6 @@ linters:
|
||||
- nilerr
|
||||
- noctx
|
||||
- nolintlint
|
||||
- perfsprint
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
@@ -358,22 +323,15 @@ linters:
|
||||
- whitespace
|
||||
fast: false
|
||||
|
||||
issues:
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
exclude-dirs:
|
||||
- pkg/plugin/generated/*
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "DefaultVolumesToRestic" # No need to report deprecate for DefaultVolumesToRestic.
|
||||
- path: ".*_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
@@ -384,6 +342,7 @@ issues:
|
||||
- unused
|
||||
- path: test/
|
||||
linters:
|
||||
- dupword
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
@@ -392,14 +351,6 @@ issues:
|
||||
- stylecheck
|
||||
- unparam
|
||||
- unused
|
||||
- path: ".*data_upload_controller_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
text: "type"
|
||||
- path: ".*config_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
text: "bucket"
|
||||
|
||||
# The list of ids of default excludes to include or disable. By default it's empty.
|
||||
include:
|
||||
@@ -411,8 +362,17 @@ issues:
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
# make issues output unique by line, default is true
|
||||
uniq-by-line: true
|
||||
# Show only new issues created after git revision `REV`
|
||||
# new-from-rev: origin/main
|
||||
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
exclude-dirs:
|
||||
- pkg/plugin/generated/*
|
||||
|
||||
severity:
|
||||
# Default value is empty string.
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.11-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -42,14 +42,12 @@ RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$( echo "${GOARM}" | cut -c2-) && \
|
||||
go build -o /output/${BIN} \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-restore-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-restore-helper && \
|
||||
go build -o /output/velero-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.11-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -73,7 +71,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.73
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
@@ -82,3 +80,4 @@ COPY --from=velero-builder /output /
|
||||
COPY --from=restic-builder /output /
|
||||
|
||||
USER cnb:cnb
|
||||
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
# Copyright the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.10-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
ARG PKG
|
||||
ARG VERSION
|
||||
ARG REGISTRY
|
||||
ARG GIT_SHA
|
||||
ARG GIT_TREE_STATE
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOARM=${TARGETVARIANT} \
|
||||
LDFLAGS="-X ${PKG}/pkg/buildinfo.Version=${VERSION} -X ${PKG}/pkg/buildinfo.GitSHA=${GIT_SHA} -X ${PKG}/pkg/buildinfo.GitTreeState=${GIT_TREE_STATE} -X ${PKG}/pkg/buildinfo.ImageRegistry=${REGISTRY}"
|
||||
|
||||
WORKDIR /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
COPY . /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$( echo "${GOARM}" | cut -c2-) && \
|
||||
go build -o /output/${BIN}.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-helper.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM mcr.microsoft.com/windows/nanoserver:${OS_VERSION}
|
||||
COPY --from=velero-builder /output /
|
||||
|
||||
USER ContainerUser
|
||||
@@ -10,6 +10,7 @@
|
||||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/) |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
|
||||
| Ming Qiu | [qiuming-best](https://github.com/qiuming-best) | [VMware](https://www.github.com/vmware/) |
|
||||
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
|
||||
| Anshul Ahuja | [anshulahuja98](https://github.com/anshulahuja98) | [Microsoft Azure](https://www.github.com/azure/) |
|
||||
@@ -26,8 +27,7 @@
|
||||
* Bridget McErlean ([zubron](https://github.com/zubron))
|
||||
* JenTing Hsiao ([jenting](https://github.com/jenting))
|
||||
* Dave Smith-Uchida ([dsu-igeek](https://github.com/dsu-igeek))
|
||||
* Ming Qiu ([qiuming-best](https://github.com/qiuming-best))
|
||||
|
||||
|
||||
## Velero Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
|
||||
138
Makefile
138
Makefile
@@ -22,26 +22,15 @@ PKG := github.com/vmware-tanzu/velero
|
||||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= velero
|
||||
# In order to push images to an insecure registry, follow the two steps:
|
||||
# 1. Set "INSECURE_REGISTRY=true"
|
||||
# 2. Provide your own buildx builder instance by setting "BUILDX_INSTANCE=your-own-builder-instance"
|
||||
# The builder can be created with the following command:
|
||||
# cat << EOF > buildkitd.toml
|
||||
# [registry."insecure-registry-ip:port"]
|
||||
# http = true
|
||||
# insecure = true
|
||||
# EOF
|
||||
# docker buildx create --name=velero-builder --driver=docker-container --bootstrap --use --config ./buildkitd.toml
|
||||
# Refer to https://github.com/docker/buildx/issues/1370#issuecomment-1288516840 for more details
|
||||
INSECURE_REGISTRY ?= false
|
||||
GCR_REGISTRY ?= gcr.io/velero-gcp
|
||||
|
||||
# Image name
|
||||
IMAGE ?= $(REGISTRY)/$(BIN)
|
||||
GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
|
||||
|
||||
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
|
||||
# that pull base images from different registries.
|
||||
VELERO_DOCKERFILE ?= Dockerfile
|
||||
VELERO_DOCKERFILE_WINDOWS ?= Dockerfile-Windows
|
||||
BUILDER_IMAGE_DOCKERFILE ?= hack/build-image/Dockerfile
|
||||
|
||||
# Calculate the realpath of the build-image Dockerfile as we `cd` into the hack/build
|
||||
@@ -79,8 +68,10 @@ TAG_LATEST ?= false
|
||||
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION) $(GCR_IMAGE):latest
|
||||
else
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION)
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
# check buildx is enabled only if docker is in path
|
||||
@@ -103,32 +94,13 @@ define BUILDX_ERROR
|
||||
buildx not enabled, refusing to run this recipe
|
||||
see: https://velero.io/docs/main/build-from-source/#making-images-and-updating-velero for more info
|
||||
endef
|
||||
# comma cannot be escaped and can only be used in Make function arguments by putting into variable
|
||||
comma=,
|
||||
|
||||
# The version of restic binary to be downloaded
|
||||
RESTIC_VERSION ?= 0.15.0
|
||||
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le
|
||||
BUILD_OUTPUT_TYPE ?= docker
|
||||
BUILD_OS ?= linux
|
||||
BUILD_ARCH ?= amd64
|
||||
BUILD_WINDOWS_VERSION ?= ltsc2022
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), docker)
|
||||
ALL_OS = linux
|
||||
ALL_ARCH.linux = $(word 2, $(subst -, ,$(shell go env GOOS)-$(shell go env GOARCH)))
|
||||
else
|
||||
ALL_OS = $(subst $(comma), ,$(BUILD_OS))
|
||||
ALL_ARCH.linux = $(subst $(comma), ,$(BUILD_ARCH))
|
||||
endif
|
||||
|
||||
ALL_ARCH.windows = $(if $(filter windows,$(ALL_OS)),amd64,)
|
||||
ALL_OSVERSIONS.windows = $(if $(filter windows,$(ALL_OS)),$(BUILD_WINDOWS_VERSION),)
|
||||
ALL_OS_ARCH.linux = $(foreach os, $(filter linux,$(ALL_OS)), $(foreach arch, ${ALL_ARCH.linux}, ${os}-$(arch)))
|
||||
ALL_OS_ARCH.windows = $(foreach os, $(filter windows,$(ALL_OS)), $(foreach arch, $(ALL_ARCH.windows), $(foreach osversion, ${ALL_OSVERSIONS.windows}, ${os}-${osversion}-${arch})))
|
||||
ALL_OS_ARCH = $(ALL_OS_ARCH.linux)$(ALL_OS_ARCH.windows)
|
||||
|
||||
ALL_IMAGE_TAGS = $(IMAGE_TAGS)
|
||||
BUILDX_PLATFORMS ?= $(subst -,/,$(ARCH))
|
||||
BUILDX_OUTPUT_TYPE ?= docker
|
||||
|
||||
# set git sha and tree state
|
||||
GIT_SHA = $(shell git rev-parse HEAD)
|
||||
@@ -152,14 +124,17 @@ GOBIN=$$(pwd)/.go/bin
|
||||
# If you want to build all containers, see the 'all-containers' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@$(MAKE) build BIN=velero-restore-helper
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@$(MAKE) --no-print-directory ARCH=$* build BIN=velero-restore-helper
|
||||
|
||||
all-build: $(addprefix build-, $(CLI_PLATFORMS))
|
||||
|
||||
all-containers:
|
||||
@$(MAKE) --no-print-directory container
|
||||
@$(MAKE) --no-print-directory container BIN=velero-restore-helper
|
||||
|
||||
local: build-dirs
|
||||
# Add DEBUG=1 to enable debug locally
|
||||
@@ -221,38 +196,11 @@ container:
|
||||
ifneq ($(BUILDX_ENABLED), true)
|
||||
$(error $(BUILDX_ERROR))
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_INSTANCE),)
|
||||
@echo creating a buildx instance
|
||||
-docker buildx rm velero-builder || true
|
||||
@docker buildx create --use --name=velero-builder
|
||||
else
|
||||
@echo using a specified buildx instance $(BUILDX_INSTANCE)
|
||||
@docker buildx use $(BUILDX_INSTANCE)
|
||||
endif
|
||||
|
||||
@mkdir -p _output
|
||||
|
||||
@for osarch in $(ALL_OS_ARCH); do \
|
||||
$(MAKE) container-$${osarch}; \
|
||||
done
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), registry)
|
||||
@for tag in $(ALL_IMAGE_TAGS); do \
|
||||
IMAGE_TAG=$${tag} $(MAKE) push-manifest; \
|
||||
done
|
||||
endif
|
||||
|
||||
container-linux-%:
|
||||
@BUILDX_ARCH=$* $(MAKE) container-linux
|
||||
|
||||
container-linux:
|
||||
@echo "building container: $(IMAGE):$(VERSION)-linux-$(BUILDX_ARCH)"
|
||||
|
||||
@docker buildx build --pull \
|
||||
--output="type=$(BUILD_OUTPUT_TYPE)$(if $(findstring tar, $(BUILD_OUTPUT_TYPE)),$(comma)dest=_output/$(BIN)-$(VERSION)-linux-$(BUILDX_ARCH).tar,)" \
|
||||
--platform="linux/$(BUILDX_ARCH)" \
|
||||
$(addprefix -t , $(addsuffix "-linux-$(BUILDX_ARCH)",$(ALL_IMAGE_TAGS))) \
|
||||
--output=type=$(BUILDX_OUTPUT_TYPE) \
|
||||
--platform $(BUILDX_PLATFORMS) \
|
||||
$(addprefix -t , $(IMAGE_TAGS)) \
|
||||
$(addprefix -t , $(GCR_IMAGE_TAGS)) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
@@ -261,54 +209,14 @@ container-linux:
|
||||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--build-arg=RESTIC_VERSION=$(RESTIC_VERSION) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE) .
|
||||
|
||||
@echo "built container: $(IMAGE):$(VERSION)-linux-$(BUILDX_ARCH)"
|
||||
|
||||
container-windows-%:
|
||||
@BUILDX_OSVERSION=$(firstword $(subst -, ,$*)) BUILDX_ARCH=$(lastword $(subst -, ,$*)) $(MAKE) container-windows
|
||||
|
||||
container-windows:
|
||||
@echo "building container: $(IMAGE):$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)"
|
||||
|
||||
@docker buildx build --pull \
|
||||
--output="type=$(BUILD_OUTPUT_TYPE)$(if $(findstring tar, $(BUILD_OUTPUT_TYPE)),$(comma)dest=_output/$(BIN)-$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH).tar,)" \
|
||||
--platform="windows/$(BUILDX_ARCH)" \
|
||||
$(addprefix -t , $(addsuffix "-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)",$(ALL_IMAGE_TAGS))) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
--build-arg=VERSION=$(VERSION) \
|
||||
--build-arg=OS_VERSION=$(BUILDX_OSVERSION) \
|
||||
--build-arg=GIT_SHA=$(GIT_SHA) \
|
||||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE_WINDOWS) .
|
||||
|
||||
@echo "built container: $(IMAGE):$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)"
|
||||
|
||||
push-manifest:
|
||||
@echo "building manifest: $(IMAGE_TAG) for $(foreach osarch, $(ALL_OS_ARCH), $(IMAGE_TAG)-${osarch})"
|
||||
@docker manifest create --amend --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG) $(foreach osarch, $(ALL_OS_ARCH), $(IMAGE_TAG)-${osarch})
|
||||
|
||||
@set -x; \
|
||||
for arch in $(ALL_ARCH.windows); do \
|
||||
for osversion in $(ALL_OSVERSIONS.windows); do \
|
||||
BASEIMAGE=mcr.microsoft.com/windows/nanoserver:$${osversion}; \
|
||||
full_version=`docker manifest inspect --insecure=$(INSECURE_REGISTRY) $${BASEIMAGE} | jq -r '.manifests[0].platform["os.version"]'`; \
|
||||
docker manifest annotate --os windows --arch $${arch} --os-version $${full_version} $(IMAGE_TAG) $(IMAGE_TAG)-windows-$${osversion}-$${arch}; \
|
||||
done; \
|
||||
done
|
||||
|
||||
@echo "pushing manifest $(IMAGE_TAG)"
|
||||
@docker manifest push --purge --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG)
|
||||
|
||||
@echo "pushed manifest $(IMAGE_TAG):"
|
||||
@docker manifest inspect --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG)
|
||||
@echo "container: $(IMAGE):$(VERSION)"
|
||||
ifeq ($(BUILDX_OUTPUT_TYPE)_$(REGISTRY), registry_velero)
|
||||
docker pull $(IMAGE):$(VERSION)
|
||||
rm -f $(BIN)-$(VERSION).tar
|
||||
docker save $(IMAGE):$(VERSION) -o $(BIN)-$(VERSION).tar
|
||||
gzip -f $(BIN)-$(VERSION).tar
|
||||
endif
|
||||
|
||||
SKIP_TESTS ?=
|
||||
test: build-dirs
|
||||
@@ -479,7 +387,7 @@ go-generate:
|
||||
# make new-changelog CHANGELOG_BODY="Changes you have made"
|
||||
new-changelog: GH_LOGIN ?= $(shell gh pr view --json author --jq .author.login 2> /dev/null)
|
||||
new-changelog: GH_PR_NUMBER ?= $(shell gh pr view --json number --jq .number 2> /dev/null)
|
||||
new-changelog: CHANGELOG_BODY ?= '$(shell gh pr view --json title --jq .title)'
|
||||
new-changelog: CHANGELOG_BODY ?= "$(shell gh pr view --json title --jq .title)"
|
||||
new-changelog:
|
||||
@if [ "$(GH_LOGIN)" = "" ]; then \
|
||||
echo "branch does not have PR or cli not logged in, try 'gh auth login' or 'gh pr create'"; \
|
||||
@@ -487,4 +395,4 @@ new-changelog:
|
||||
fi
|
||||
@mkdir -p ./changelogs/unreleased/ && \
|
||||
echo $(CHANGELOG_BODY) > ./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN) && \
|
||||
echo \"$(CHANGELOG_BODY)\" added to "./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN)"
|
||||
echo "\"$(CHANGELOG_BODY)\" added to ./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN)"
|
||||
|
||||
@@ -42,7 +42,6 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
| 1.13 | 1.18-latest | 1.26.5, 1.27.3, 1.27.8, and 1.28.3 |
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.23.11 as tilt-helper
|
||||
FROM golang:1.22 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
## v1.16.2
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### All Changes
|
||||
* Update "Default Volumes to Fs Backup" to "File System Backup (Default)" (#9105, @shubham-pampattiwar)
|
||||
* Fix missing defaultVolumesToFsBackup flag output in Velero describe backup cmd (#9103, @shubham-pampattiwar)
|
||||
* Add imagePullSecrets inheritance for VGDP pod and maintenance job. (#9102, @blackpiglet)
|
||||
* Fix issue #9077, don't block backup deletion on list VS error (#9101, @Lyndon-Li)
|
||||
* Mounted cloud credentials should not be world-readable (#9094, @sseago)
|
||||
* Allow for proper tracking of multiple hooks per container (#9060, @sseago)
|
||||
* Add BSL status check for backup/restore operations. (#9010, @blackpiglet)
|
||||
|
||||
|
||||
## v1.16.1
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### All Changes
|
||||
* Call WaitGroup.Done() once only when PVB changes to final status the first time to avoid panic (#8940, @ywk253100)
|
||||
* Add VolumeSnapshotContent into the RIA and the mustHave resource list. (#8926, @blackpiglet)
|
||||
* Warn for not found error in patching managed fields (#8916, @sseago)
|
||||
* Fix issue 8878, relief node os deduction error checks (#8911, @Lyndon-Li)
|
||||
|
||||
|
||||
## v1.16
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### Highlights
|
||||
#### Windows cluster support
|
||||
In v1.16, Velero supports to run in Windows clusters and backup/restore Windows workloads, either stateful or stateless:
|
||||
* Hybrid build and all-in-one image: the build process is enhanced to build an all-in-one image for hybrid CPU architecture and hybrid platform. For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/multiple-arch-build-with-windows.md
|
||||
* Deployment in Windows clusters: Velero node-agent, data mover pods and maintenance jobs now support to run in both linux and Windows nodes
|
||||
* Data mover backup/restore Windows workloads: Velero built-in data mover supports Windows workloads throughout its full cycle, i.e., discovery, backup, restore, pre/post hook, etc. It automatically identifies Windows workloads and schedules data mover pods to the right group of nodes
|
||||
|
||||
Check the epic issue https://github.com/vmware-tanzu/velero/issues/8289 for more information.
|
||||
|
||||
#### Parallel Item Block backup
|
||||
v1.16 now supports to back up item blocks in parallel. Specifically, during backup, correlated resources are grouped in item blocks and Velero backup engine creates a thread pool to back up the item blocks in parallel. This significantly improves the backup throughput, especially when there are large scale of resources.
|
||||
Pre/post hooks also belongs to item blocks, so will also run in parallel along with the item blocks.
|
||||
Users are allowed to configure the parallelism through the `--item-block-worker-count` Velero server parameter. If not configured, the default parallelism is 1.
|
||||
|
||||
For more information, check issue https://github.com/vmware-tanzu/velero/issues/8334.
|
||||
|
||||
#### Data mover restore enhancement in scalability
|
||||
In previous releases, for each volume of WaitForFirstConsumer mode, data mover restore is only allowed to happen in the node that the volume is attached. This severely degrades the parallelism and the balance of node resource(CPU, memory, network bandwidth) consumption for data mover restore (https://github.com/vmware-tanzu/velero/issues/8044).
|
||||
|
||||
In v1.16, users are allowed to configure data mover restores running and spreading evenly across all nodes in the cluster. The configuration is done through a new flag `ignoreDelayBinding` in node-agent configuration (https://github.com/vmware-tanzu/velero/issues/8242).
|
||||
|
||||
#### Data mover enhancements in observability
|
||||
In 1.16, some observability enhancements are added:
|
||||
* Output various statuses of intermediate objects for failures of data mover backup/restore (https://github.com/vmware-tanzu/velero/issues/8267)
|
||||
* Output the errors when Velero fails to delete intermediate objects during clean up (https://github.com/vmware-tanzu/velero/issues/8125)
|
||||
|
||||
The outputs are in the same node-agent log and enabled automatically.
|
||||
|
||||
#### CSI snapshot backup/restore enhancement in usability
|
||||
In previous releases, a unnecessary VolumeSnapshotContent object is retained for each backup and synced to other clusters sharing the same backup storage location. And during restore, the retained VolumeSnapshotContent is also restored unnecessarily.
|
||||
|
||||
In 1.16, the retained VolumeSnapshotContent is removed from the backup, so no unnecessary CSI objects are synced or restored.
|
||||
|
||||
For more information, check issue https://github.com/vmware-tanzu/velero/issues/8725.
|
||||
|
||||
#### Backup Repository Maintenance enhancement in resiliency and observability
|
||||
In v1.16, some enhancements of backup repository maintenance are added to improve the observability and resiliency:
|
||||
* A new backup repository maintenance history section, called `RecentMaintenance`, is added to the BackupRepository CR. Specifically, for each BackupRepository, including start/completion time, completion status and error message. (https://github.com/vmware-tanzu/velero/issues/7810)
|
||||
* Running maintenance jobs are now recaptured after Velero server restarts. (https://github.com/vmware-tanzu/velero/issues/7753)
|
||||
* The maintenance job will not be launched for readOnly BackupStorageLocation. (https://github.com/vmware-tanzu/velero/issues/8238)
|
||||
* The backup repository will not try to initialize a new repository for readOnly BackupStorageLocation. (https://github.com/vmware-tanzu/velero/issues/8091)
|
||||
* Users now are allowed to configure the intervals of an effective maintenance in the way of `normalGC`, `fastGC` and `eagerGC`, through the `fullMaintenanceInterval` parameter in backupRepository configuration. (https://github.com/vmware-tanzu/velero/issues/8364)
|
||||
|
||||
#### Volume Policy enhancement of filtering volumes by PVC labels
|
||||
In v1.16, Volume Policy is extended to support filtering volumes by PVC labels. (https://github.com/vmware-tanzu/velero/issues/8256).
|
||||
|
||||
#### Resource Status restore per object
|
||||
In v1.16, users are allowed to define whether to restore resource status per object through an annotation `velero.io/restore-status` set on the object. (https://github.com/vmware-tanzu/velero/issues/8204).
|
||||
|
||||
#### Velero Restore Helper binary is merged into Velero image
|
||||
In v1.16, Velero banaries, i.e., velero, velero-helper and velero-restore-helper, are all included into the single Velero image. (https://github.com/vmware-tanzu/velero/issues/8484).
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: 1.23.7
|
||||
kopia: 0.19.0
|
||||
|
||||
### Limitations/Known issues
|
||||
#### Limitations of Windows support
|
||||
* fs-backup is not supported for Windows workloads and so fs-backup runs only in linux nodes for linux workloads
|
||||
* Backup/restore of NTFS extended attributes/advanced features are not supported, i.e., Security Descriptors, System/Hidden/ReadOnly attributes, Creation Time, NTFS Streams, etc.
|
||||
|
||||
### All Changes
|
||||
* Add third party annotation support for maintenance job, so that the declared third party annotations could be added to the maintenance job pods (#8812, @Lyndon-Li)
|
||||
* Fix issue #8803, use deterministic name to create backupRepository (#8808, @Lyndon-Li)
|
||||
* Refactor restoreItem and related functions to differentiate the backup resource name and the restore target resource name. (#8797, @blackpiglet)
|
||||
* ensure that PV is removed before VS is deleted (#8777, @ix-rzi)
|
||||
* host_pods should not be mandatory to node-agent (#8774, @mpryc)
|
||||
* Log doesn't show pv name, but displays %!s(MISSING) instead (#8771, @hu-keyu)
|
||||
* Fix issue #8754, add third party annotation support for data mover (#8770, @Lyndon-Li)
|
||||
* Add docs for volume policy with labels as a criteria (#8759, @shubham-pampattiwar)
|
||||
* Move pvc annotation removal from CSI RIA to regular PVC RIA (#8755, @sseago)
|
||||
* Add doc for maintenance history (#8747, @Lyndon-Li)
|
||||
* Fix issue #8733, add doc for restorePVC (#8737, @Lyndon-Li)
|
||||
* Fix issue #8426, add doc for Windows support (#8736, @Lyndon-Li)
|
||||
* Fix issue #8475, refactor build-from-source doc for hybrid image build (#8729, @Lyndon-Li)
|
||||
* Return directly if no pod volme backup are tracked (#8728, @ywk253100)
|
||||
* Fix issue #8706, for immediate volumes, there is no selected-node annotation on PVC, so deduce the attached node from VolumeAttachment CRs (#8715, @Lyndon-Li)
|
||||
* Add labels as a criteria for volume policy (#8713, @shubham-pampattiwar)
|
||||
* Copy SecurityContext from Containers[0] if present for PVR (#8712, @sseago)
|
||||
* Support pushing images to an insecure registry (#8703, @ywk253100)
|
||||
* Modify golangci configuration to make it work. (#8695, @blackpiglet)
|
||||
* Run backup post hooks inside ItemBlock synchronously (#8694, @ywk253100)
|
||||
* Add docs for object level status restore (#8693, @shubham-pampattiwar)
|
||||
* Clean artifacts generated during CSI B/R. (#8684, @blackpiglet)
|
||||
* Don't run maintenance on the ReadOnly BackupRepositories. (#8681, @blackpiglet)
|
||||
* Fix #8657: WaitGroup panic issue (#8679, @ywk253100)
|
||||
* Fixes issue #8214, validate `--from-schedule` flag in create backup command to prevent empty or whitespace-only values. (#8665, @aj-2000)
|
||||
* Implement parallel ItemBlock processing via backup_controller goroutines (#8659, @sseago)
|
||||
* Clean up leaked CSI snapshot for incomplete backup (#8637, @raesonerjt)
|
||||
* Handle update conflict when restoring the status (#8630, @ywk253100)
|
||||
* Fix issue #8419, support repo maintenance job to run on Windows nodes (#8626, @Lyndon-Li)
|
||||
* Always create DataUpload configmap in restore namespace (#8621, @sseago)
|
||||
* Fix issue #8091, avoid to create new repo when BSL is readonly (#8615, @Lyndon-Li)
|
||||
* Fix issue #8242, distribute dd evenly across nodes (#8611, @Lyndon-Li)
|
||||
* Fix issue #8497, update du/dd progress on completion (#8608, @Lyndon-Li)
|
||||
* Fix issue #8418, add Windows toleration to data mover pods (#8606, @Lyndon-Li)
|
||||
* Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue (#8603, @ywk253100)
|
||||
* Fix issue #8067, add tmp folder (/tmp for linux, C:\Windows\Temp for Windows) as an alternative of udmrepo's config file location (#8602, @Lyndon-Li)
|
||||
* Data mover restore for Windows (#8594, @Lyndon-Li)
|
||||
* Skip patching the PV in finalization for failed operation (#8591, @reasonerjt)
|
||||
* Fix issue #8579, set event burst to block event broadcaster from filtering events (#8590, @Lyndon-Li)
|
||||
* Configurable Kopia Maintenance Interval. backup-repository-configmap adds an option for configurable`fullMaintenanceInterval` where fastGC (12 hours), and eagerGC (6 hours) allowing for faster removal of deleted velero backups from kopia repo. (#8581, @kaovilai)
|
||||
* Fix issue #7753, recall repo maintenance history on Velero server restart (#8580, @Lyndon-Li)
|
||||
* Clear validation errors when schedule is valid (#8575, @ywk253100)
|
||||
* Merge restore helper image into Velero server image (#8574, @ywk253100)
|
||||
* Don't include excluded items in ItemBlocks (#8572, @sseago)
|
||||
* fs uploader and block uploader support Windows nodes (#8569, @Lyndon-Li)
|
||||
* Fix issue #8418, support data mover backup for Windows nodes (#8555, @Lyndon-Li)
|
||||
* Fix issue #8044, allow users to ignore delay binding the restorePVC of data mover when it is in WaitForFirstConsumer mode (#8550, @Lyndon-Li)
|
||||
* Fix issue #8539, validate uploader types when o.CRDsOnly is set to false only since CRD installation doesn't rely on uploader types (#8538, @Lyndon-Li)
|
||||
* Fix issue #7810, add maintenance history for backupRepository CRs (#8532, @Lyndon-Li)
|
||||
* Make fs-backup work on linux nodes with the new Velero deployment and disable fs-backup if the source/target pod is running in non-linux node (#8424) (#8518, @Lyndon-Li)
|
||||
* Fix issue: backup schedule pause/unpause doesn't work (#8512, @ywk253100)
|
||||
* Fix backup post hook issue #8159 (caused by #7571): always execute backup post hooks after PVBs are handled (#8509, @ywk253100)
|
||||
* Fix issue #8267, enhance the error message when expose fails (#8508, @Lyndon-Li)
|
||||
* Fix issue #8416, #8417, deploy Velero server and node-agent in linux/Windows hybrid env (#8504, @Lyndon-Li)
|
||||
* Design to add label selector as a criteria for volume policy (#8503, @shubham-pampattiwar)
|
||||
* Related to issue #8485, move the acceptedByNode and acceptedTimestamp to Status of DU/DD CRD (#8498, @Lyndon-Li)
|
||||
* Add SecurityContext to restore-helper (#8491, @reasonerjt)
|
||||
* Fix issue #8433, add third party labels to data mover pods when the same labels exist in node-agent pods (#8487, @Lyndon-Li)
|
||||
* Fix issue #8485, add an accepted time so as to count the prepare timeout (#8486, @Lyndon-Li)
|
||||
* Fix issue #8125, log diagnostic info for data mover exposers when expose timeout (#8482, @Lyndon-Li)
|
||||
* Fix issue #8415, implement multi-arch build and Windows build (#8476, @Lyndon-Li)
|
||||
* Pin kopia to 0.18.2 (#8472, @Lyndon-Li)
|
||||
* Add nil check for updating DataUpload VolumeInfo in finalizing phase (#8471, @blackpiglet)
|
||||
* Allowing Object-Level Resource Status Restore (#8464, @shubham-pampattiwar)
|
||||
* For issue #8429. Add the design for multi-arch build and windows build (#8459, @Lyndon-Li)
|
||||
* Upgrade go.mod k8s.io/ go.mod to v0.31.3 and implemented proper logger configuration for both client-go and controller-runtime libraries. This change ensures that logging format and level settings are properly applied throughout the codebase. The update improves logging consistency and control across the Velero system. (#8450, @kaovilai)
|
||||
* Add Design for Allowing Object-Level Resource Status Restore (#8403, @shubham-pampattiwar)
|
||||
* Fix issue #8391, check ErrCancelled from suffix of data mover pod's termination message (#8396, @Lyndon-Li)
|
||||
* Fix issue #8394, don't call closeDataPath in VGDP callbacks, otherwise, the VGDP cleanup will hang (#8395, @Lyndon-Li)
|
||||
* Adding support in velero Resource Policies for filtering PVs based on additional VolumeAttributes properties under CSI PVs (#8383, @mayankagg9722)
|
||||
* Add --item-block-worker-count flag to velero install and server (#8380, @sseago)
|
||||
* Make BackedUpItems thread safe (#8366, @sseago)
|
||||
* Include --annotations flag in backup and restore create commands (#8354, @alromeros)
|
||||
* Use aggregated discovery API to discovery API groups and resources (#8353, @ywk253100)
|
||||
* Copy "envFrom" from Velero server when creating maintenance jobs (#8343, @evhan)
|
||||
* Set hinting region to use for GetBucketRegion() in pkg/repository/config/aws.go (#8297, @kaovilai)
|
||||
* Bump up version of client-go and controller-runtime (#8275, @ywk253100)
|
||||
* fix(pkg/repository/maintenance): don't panic when there's no container statuses (#8271, @mcluseau)
|
||||
* Add Backup warning for inclusion of NS managed by ArgoCD (#8257, @shubham-pampattiwar)
|
||||
* Added tracking for deleted namespace status check in restore flow. (#8233, @sangitaray2021)
|
||||
1
changelogs/unreleased/8271-mcluseau
Normal file
1
changelogs/unreleased/8271-mcluseau
Normal file
@@ -0,0 +1 @@
|
||||
fix(pkg/repository/maintenance): don't panic when there's no container statuses
|
||||
1
changelogs/unreleased/8275-ywk253100
Normal file
1
changelogs/unreleased/8275-ywk253100
Normal file
@@ -0,0 +1 @@
|
||||
Bump up version of client-go and controller-runtime
|
||||
1
changelogs/unreleased/8353-ywk253100
Normal file
1
changelogs/unreleased/8353-ywk253100
Normal file
@@ -0,0 +1 @@
|
||||
Use aggregated discovery API to discovery API groups and resources
|
||||
1
changelogs/unreleased/8354-alromeros
Normal file
1
changelogs/unreleased/8354-alromeros
Normal file
@@ -0,0 +1 @@
|
||||
Include --annotations flag in backup and restore create commands
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: backuprepositories.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -88,8 +88,8 @@ spec:
|
||||
description: BackupRepositoryStatus is the current status of a BackupRepository.
|
||||
properties:
|
||||
lastMaintenanceTime:
|
||||
description: LastMaintenanceTime is the last time repo maintenance
|
||||
succeeded.
|
||||
description: LastMaintenanceTime is the last time maintenance was
|
||||
run.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
@@ -104,33 +104,6 @@ spec:
|
||||
- Ready
|
||||
- NotReady
|
||||
type: string
|
||||
recentMaintenance:
|
||||
description: RecentMaintenance is status of the recent repo maintenance.
|
||||
items:
|
||||
properties:
|
||||
completeTimestamp:
|
||||
description: CompleteTimestamp is the completion time of the
|
||||
repo maintenance.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
description: Message is a message about the current status of
|
||||
the repo maintenance.
|
||||
type: string
|
||||
result:
|
||||
description: Result is the result of the repo maintenance.
|
||||
enum:
|
||||
- Succeeded
|
||||
- Failed
|
||||
type: string
|
||||
startTimestamp:
|
||||
description: StartTimestamp is the start time of the repo maintenance.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: backups.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -63,6 +63,7 @@ spec:
|
||||
DefaultVolumesToRestic specifies whether restic should be used to take a
|
||||
backup of all pod volumes by default.
|
||||
|
||||
|
||||
Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead.
|
||||
nullable: true
|
||||
type: boolean
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: backupstoragelocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -92,7 +92,9 @@ spec:
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
@@ -144,6 +146,7 @@ spec:
|
||||
description: |-
|
||||
AccessMode is an unused field.
|
||||
|
||||
|
||||
Deprecated: there is now an AccessMode field on the Spec and this field
|
||||
will be removed entirely as of v2.0.
|
||||
enum:
|
||||
@@ -155,6 +158,7 @@ spec:
|
||||
LastSyncedRevision is the value of the `metadata/revision` file in the backup
|
||||
storage location the last time the BSL's contents were synced into the cluster.
|
||||
|
||||
|
||||
Deprecated: this field is no longer updated or used for detecting changes to
|
||||
the location's contents and will be removed entirely in v2.0.
|
||||
type: string
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: deletebackuprequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: downloadrequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: podvolumebackups.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -96,6 +96,7 @@ spec:
|
||||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: podvolumerestores.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -93,6 +93,7 @@ spec:
|
||||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: restores.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: schedules.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -102,6 +102,7 @@ spec:
|
||||
DefaultVolumesToRestic specifies whether restic should be used to take a
|
||||
backup of all pod volumes by default.
|
||||
|
||||
|
||||
Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead.
|
||||
nullable: true
|
||||
type: boolean
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: serverstatusrequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: volumesnapshotlocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -63,7 +63,9 @@ spec:
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: datadownloads.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -92,13 +92,6 @@ spec:
|
||||
DataMover specifies the data mover to be used by the backup.
|
||||
If DataMover is "" or "velero", the built-in data mover will be used.
|
||||
type: string
|
||||
nodeOS:
|
||||
description: NodeOS is OS of the node where the DataDownload is processed.
|
||||
enum:
|
||||
- auto
|
||||
- linux
|
||||
- windows
|
||||
type: string
|
||||
operationTimeout:
|
||||
description: |-
|
||||
OperationTimeout specifies the time used to wait internal operations,
|
||||
@@ -143,16 +136,6 @@ spec:
|
||||
status:
|
||||
description: DataDownloadStatus is the current status of a DataDownload.
|
||||
properties:
|
||||
acceptedByNode:
|
||||
description: Node is name of the node where the DataUpload is prepared.
|
||||
type: string
|
||||
acceptedTimestamp:
|
||||
description: |-
|
||||
AcceptedTimestamp records the time the DataUpload is to be prepared.
|
||||
The server's time is used for AcceptedTimestamp
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
completionTimestamp:
|
||||
description: |-
|
||||
CompletionTimestamp records the time a restore was completed.
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: datauploads.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
@@ -143,17 +143,6 @@ spec:
|
||||
status:
|
||||
description: DataUploadStatus is the current status of a DataUpload.
|
||||
properties:
|
||||
acceptedByNode:
|
||||
description: AcceptedByNode is name of the node where the DataUpload
|
||||
is prepared.
|
||||
type: string
|
||||
acceptedTimestamp:
|
||||
description: |-
|
||||
AcceptedTimestamp records the time the DataUpload is to be prepared.
|
||||
The server's time is used for AcceptedTimestamp
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
completionTimestamp:
|
||||
description: |-
|
||||
CompletionTimestamp records the time a backup was completed.
|
||||
@@ -176,13 +165,6 @@ spec:
|
||||
node:
|
||||
description: Node is name of the node where the DataUpload is processed.
|
||||
type: string
|
||||
nodeOS:
|
||||
description: NodeOS is OS of the node where the DataUpload is processed.
|
||||
enum:
|
||||
- auto
|
||||
- linux
|
||||
- windows
|
||||
type: string
|
||||
path:
|
||||
description: Path is the full path of the snapshot volume being backed
|
||||
up.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -8,7 +8,17 @@ rules:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumerclaims
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
@@ -16,18 +26,6 @@ rules:
|
||||
- velero.io
|
||||
resources:
|
||||
- backuprepositories
|
||||
- backups
|
||||
- backupstoragelocations
|
||||
- datadownloads
|
||||
- datauploads
|
||||
- deletebackuprequests
|
||||
- downloadrequests
|
||||
- podvolumebackups
|
||||
- podvolumerestores
|
||||
- restores
|
||||
- schedules
|
||||
- serverstatusrequests
|
||||
- volumesnapshotlocations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
@@ -40,18 +38,239 @@ rules:
|
||||
- velero.io
|
||||
resources:
|
||||
- backuprepositories/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backups
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backups/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backupstoragelocations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backupstoragelocations/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datadownloads
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datadownloads/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datauploads
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datauploads/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- deletebackuprequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- deletebackuprequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- downloadrequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- downloadrequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumebackups
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumebackups/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumerestores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumerestores/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- restores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- restores/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- schedules
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- schedules/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- serverstatusrequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- serverstatusrequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- volumesnapshotlocations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
|
||||
@@ -276,7 +276,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
|
||||
if !boolptr.IsSetToFalse(v.snapshotVolumes) {
|
||||
// If the backup.Spec.SnapshotVolumes is not set, or set to true, then should take the snapshot.
|
||||
v.logger.Infof("performing snapshot action for pv %s as the snapshotVolumes is not set to false", pv.Name)
|
||||
v.logger.Infof("performing snapshot action for pv %s as the snapshotVolumes is not set to false")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
# Multi-arch Build and Windows Build Support
|
||||
|
||||
## Background
|
||||
|
||||
At present, Velero images could be built for linux-amd64 and linux-arm64. We need to support other platforms, i.e., windows-amd64.
|
||||
At present, for linux image build, we leverage Buildkit's `--platform` option to create the image manifest list in one build call. However, it is a limited way and doesn't fully support all multi-arch scenarios. Specifically, since the build is done in one call with the same parameters, it is impossbile to build images with different configurations (e.g., Windows build requires a different Dockerfile).
|
||||
At present, Velero by default build images locally, or no image or manifest is pushed to registry. However, docker doesn't support multi-arch build locally. We need to clarify the behavior of local build.
|
||||
|
||||
## Goals
|
||||
- Refactor the `make container` process to fully support multi-arch build
|
||||
- Add Windows build to the existing build process
|
||||
- Clarify the behavior of local build with multi-arch build capabilities
|
||||
- Don't change the pattern of the final image tag to be used by users
|
||||
|
||||
## Non-Goals
|
||||
- There may be some workarounds to make the multi-arch image/manifest fully available locally. These workarounds will not be adopted, so local build always build single-arch images
|
||||
|
||||
## Local Build
|
||||
|
||||
For local build, two values of `--output` parameter for `docker buildx build` are supported:
|
||||
- `docker`: a docker format image is built, but the image is only built for the platform (`<os>/<arch>`) as same as the building env. E.g., when building from linux-amd64 env, a single manifest of linux-amd64 is created regardless how the input parameters are configured.
|
||||
- `tar`: one or more images are built as tarballs according to the input platform (`<os>/<arch>`) parameters. Specifically, one tarball is generated for each platform. The build process is the same with the `Build Separate Manifests` of `Push Build` as detailed below. Merely, the `--output` parameter diffs, as `type=tar;dest=<tarball generated path>`. The tarball is generated to the `_output` folder and named with the platform info, e.g., `_output/velero-main-linux-amd64.tar`.
|
||||
|
||||
## Push Build
|
||||
|
||||
For push build, the `--output` parameter for `docker buildx build` is always `registry`. And build will go according to the input parameters and create multi-arch manifest lists.
|
||||
|
||||
### Step 1: Build Separate Manifests
|
||||
|
||||
Instead of specifying multiple platforms (`<os>/<arch>`) to `--platform` option, we add multiple `container-%` targets in Makefile and each target builds one platform representively.
|
||||
|
||||
The goal here is to build multiple manifests through the multiple targets. However, `docker buildx build` by default creates a manifest list even though there is only one element in `--platform`. Therefore, two flags `--provenance=false` and `--sbom=false` will be set additionally to force `docker buildx build` to create manifests.
|
||||
|
||||
Each manifest has a unique tag, the OS type and arch is added to the tag, in the pattern `$(REGISTRY)/$(BIN):$(VERSION)-$(OS)-$(ARCH)`. For example, `velero/velero:main-linux-amd64`.
|
||||
|
||||
All the created manifests will be pushed to registry so that the all-in-one manifest list could be created.
|
||||
|
||||
### Step 2: Create All-In-One Manifest List
|
||||
|
||||
The next step is to create a manifest list to include all the created manifests. This could be done by `docker manifest create` command, the tags created and pushed at Step 1 are passed to this command.
|
||||
A tag is also created for the manifest list, in the pattern `$(REGISTRY)/$(BIN):$(VERSION)`. For example, `velero/velero:main`.
|
||||
|
||||
### Step 3: Push All-In-One Manifest List
|
||||
|
||||
The created manifest will be pushed to registry by command `docker manifest push`.
|
||||
|
||||
## Input Parameters
|
||||
|
||||
Below are the input parameters that are configurable to meet different build purposes during Dev and release cycle:
|
||||
- BUILD_OUTPUT_TYPE: the type of output for the build, i.e., `docker`, `tar`, `registry`, while `docker` and `tar` is for local build; `registry` means push build. Default value is `docker`
|
||||
- BUILD_OS: which types of OS should be built for. Multiple values are accepted, e.g., `linux,windows`. Default value is `linux`
|
||||
- BUILD_ARCH: which types of architecture should be built for. Multiple values are accepted, e.g., `amd64,arm64`. Default value is `amd64`
|
||||
- BUILDX_INSTANCE: an existing buildx instance to be used by the build. Default value is <empty> which indicates the build to create a new buildx instance
|
||||
|
||||
## Windows Build
|
||||
|
||||
Windows container images vary from Windows OS versions, e.g., `ltsc2022` for Windows server 2022 and `1809` for Windows server 2019. Images for different OS versions should be built separately.
|
||||
Therefore, separate build targets are added for each OS version, like `container-windows-%`.
|
||||
For the same reason, a new input parameter is added, `BUILD_WINDOWS_VERSION`. The default value is `ltsc2022`. Windows server 2022 is the only base image we will deliver officially, Windows server 2019 is not supported. In future, we may need to support Windows server 2025 base image.
|
||||
For local build to tar, the Windows OS version is also added to the name of the tarball, e.g., `_output/velero-main-windows-ltsc2022-amd64.tar`.
|
||||
|
||||
At present, Windows container image only supports `amd64` as the architecture, so `BUILD_ARCH` is ignored for Windows.
|
||||
|
||||
The Windows manifests need to be annotated with os type, arch, and os version. This will be done through `docker manifest annotate` command.
|
||||
|
||||
## Use Malti-arch Images
|
||||
|
||||
In order to use the images, the manifest list's tag should be provided to `velero install` command or helm, the individual manifests are covered by the manifest list. During launch time, the container engine will load the right image to the container according to the platform of the running node.
|
||||
|
||||
## Build Samples
|
||||
|
||||
**Local build to docker**
|
||||
```
|
||||
make container
|
||||
```
|
||||
The built image could be listed by `docker image ls`.
|
||||
|
||||
**Local build for linux-amd64 and windows-amd64 to tar**
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=tar BUILD_OS=linux,windows make container
|
||||
```
|
||||
Under `_output` directory, below files are generated:
|
||||
```
|
||||
velero-main-linux-amd64.tar
|
||||
velero-main-windows-ltsc2022-amd64.tar
|
||||
```
|
||||
|
||||
**Local build for linux-amd64, linux-arm64 and windows-amd64 to tar**
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=tar BUILD_OS=linux,windows BUILD_ARCH=amd64,arm64 make container
|
||||
```
|
||||
Under `_output` directory, below files are generated:
|
||||
```
|
||||
velero-main-linux-amd64.tar
|
||||
velero-main-linux-arm64.tar
|
||||
velero-main-windows-ltsc2022-amd64.tar
|
||||
```
|
||||
|
||||
**Push build for linux-amd64 and windows-amd64**
|
||||
Prerequisite: login to registry, e.g., through `docker login`
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=registry REGISTRY=<registry> BUILD_OS=linux,windows make container
|
||||
```
|
||||
Nothing is available locally, in the registry 3 tags are available:
|
||||
```
|
||||
velero/velero:main
|
||||
velero/velero:main-windows-ltsc2022-amd64
|
||||
velero/velero:main-linux-amd64
|
||||
```
|
||||
|
||||
**Push build for linux-amd64, linux-arm64 and windows-amd64**
|
||||
Prerequisite: login to registry, e.g., through `docker login`
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=registry REGISTRY=<registry> BUILD_OS=linux,windows BUILD_ARCH=amd64,arm64 make container
|
||||
```
|
||||
Nothing is available locally, in the registry 4 tags are available:
|
||||
```
|
||||
velero/velero:main
|
||||
velero/velero:main-windows-ltsc2022-amd64
|
||||
velero/velero:main-linux-amd64
|
||||
velero/velero:main-linux-arm64
|
||||
```
|
||||
@@ -1,113 +0,0 @@
|
||||
# Allow Object-Level Resource Status Restore in Velero
|
||||
|
||||
## Abstract
|
||||
This design proposes a way to enhance Velero’s restore functionality by enabling object-level resource status restoration through annotations.
|
||||
Currently, Velero allows restoring resource statuses only at a resource type level, which lacks granularity of restoring the status of specific resources.
|
||||
By introducing an annotation that controllers can set on individual resource objects, this design aims to improve flexibility and autonomy for users/resource-controllers, providing a more way
|
||||
to enable resource status restore.
|
||||
|
||||
|
||||
## Background
|
||||
Velero provides the `restoreStatus` field in the Restore API to specify resource types for status restoration. However, this feature is limited to resource types as a whole, lacking the granularity needed to restore specific objects of a resource type. Resource controllers, especially those managing custom resources with external dependencies, may need to restore status on a per-object basis based on internal logic and dependencies.
|
||||
|
||||
This design adds an annotation-based approach to allow controllers to specify status restoration at the object level, enabling Velero to handle status restores more flexibly.
|
||||
|
||||
## Goals
|
||||
- Provide a mechanism to specify the restoration of a resource’s status at an object level.
|
||||
- Maintain backwards compatibility with existing functionality, allowing gradual adoption of this feature.
|
||||
- Integrate the new annotation-based objects-level status restore with Velero’s existing resource-type-level `restoreStatus` configuration.
|
||||
|
||||
## Non-Goals
|
||||
- Alter Velero’s existing resource type-level status restoration mechanism for resources without annotations.
|
||||
|
||||
## Use-Cases/Scenarios
|
||||
|
||||
1. Controller managing specific Resources
|
||||
- A resource controller identifies that a specific object of a resource should have its status restored due to particular dependencies
|
||||
- The controller automatically sets the `velero.io/restore-status: true` annotation on the resource.
|
||||
- During restore, Velero restores the status of this object, while leaving other resources unaffected.
|
||||
- The status for the annotated object will be restored regardless of its inclusion/exclusion in `restoreStatus.includedResources`
|
||||
|
||||
2. A specific object must not have its status restored even if its included in `restoreStatus.includedResources`
|
||||
- A user specifies a resource type in the `restoreStatus.includedResources` field within the Restore custom resource.
|
||||
- A particular object of that resource type is annotated with `velero.io/restore-status: false` by the user.
|
||||
- The status of the annotated object will not restored even though its included in `restoreStatus.includedResources` because annotation is `false` and it takes precedence.
|
||||
|
||||
4. Default Behavior for objects Without the Annotation
|
||||
- Objects without the `velero.io/restore-status` annotation behave as they currently do: Velero skips their status restoration unless the resource type is specified in the `restoreStatus.includedResources` field.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
- Object-Level Status Restore Annotation: We are introducing the `velero.io/restore-status` annotation at the resource object level to mark specific objects for status restoration.
|
||||
- `true`: Indicates that the status should be restored for this object
|
||||
- `false`: Skip restoring status for this specific object
|
||||
- Invalid or missing annotations defer to the meaning of existing resource type-level logic.
|
||||
|
||||
- Restore logic precedence:
|
||||
- Annotations take precedence when they exist with valid values (`true` or `false`).
|
||||
- Restore spec `restoreStatus.includedResources` is only used when annotations are invalid or missing.
|
||||
|
||||
- Velero Restore Logic Update: During a restore operation, Velero will:
|
||||
- Extend the existing restore logic to parse and prioritize annotations introduced in this design.
|
||||
- Update resource objects accordingly based on their annotation values or fallback configuration.
|
||||
|
||||
|
||||
## Detailed Design
|
||||
|
||||
- Annotation for object-Level Status Restore: The `velero.io/restore-status` annotation will be set on individual resource objects by users/controllers as needed:
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
velero.io/restore-status: "true"
|
||||
```
|
||||
|
||||
- Restore Logic Modifications: During the restore operation, the restore controller will follow these steps:
|
||||
- Parse the `restoreStatus.includedResources` spec to determine resource types eligible for status restoration.
|
||||
- For each resource object:
|
||||
- Check for the `velero.io/restore-status` annotation.
|
||||
- If the annotation value is:
|
||||
- `true`: Restore the status of the object
|
||||
- `false`: Skip restoring the status of the object
|
||||
- If the annotation is invalid or missing:
|
||||
- Default to the `restoreStatus.includedResources` configuration
|
||||
|
||||
|
||||
## Implementation
|
||||
|
||||
We are targeting the implementation of this design for Velero 1.16 release.
|
||||
|
||||
Current restoreStatus logic resides here: https://github.com/vmware-tanzu/velero/blob/32a8c62920ad96c70f1465252c0197b83d5fa6b6/pkg/restore/restore.go#L1652
|
||||
|
||||
The modified logic would look somewhat like:
|
||||
|
||||
```go
|
||||
// Determine whether to restore status from resource type configuration
|
||||
shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String())
|
||||
|
||||
// Check for object-level annotation
|
||||
annotations := obj.GetAnnotations()
|
||||
objectAnnotation := annotations["velero.io/restore-status"]
|
||||
annotationValid := objectAnnotation == "true" || objectAnnotation == "false"
|
||||
|
||||
// Determine restore behavior based on annotation precedence
|
||||
shouldRestoreStatus = (annotationValid && objectAnnotation == "true") || (!annotationValid && shouldRestoreStatus)
|
||||
|
||||
ctx.log.Debugf("status field for %s: exists: %v, should restore: %v (by annotation: %v)", newGR, statusFieldExists, shouldRestoreStatus, annotationValid)
|
||||
|
||||
if shouldRestoreStatus && statusFieldExists {
|
||||
if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil {
|
||||
ctx.log.Errorf("Could not set status field %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
obj.SetResourceVersion(createdObj.GetResourceVersion())
|
||||
updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
ctx.log.Infof("Status field update failed %s: %v", kube.NamespaceAndName(obj), err)
|
||||
warnings.Add(namespace, err)
|
||||
} else {
|
||||
createdObj = updated
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -71,20 +71,6 @@ type ScheduleSpec struct {
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** The Velero server automatically patches the `skipImmediately` field back to `false` after it's been used. This is because `skipImmediately` is designed to be a one-time operation rather than a persistent state. When the controller detects that `skipImmediately` is set to `true`, it:
|
||||
1. Sets the flag back to `false`
|
||||
2. Records the current time in `schedule.Status.LastSkipped`
|
||||
|
||||
This "consume and reset" pattern ensures that after skipping one immediate backup, the schedule returns to normal behavior for subsequent runs. The `LastSkipped` timestamp is then used to determine when the next backup should run.
|
||||
|
||||
```go
|
||||
// From pkg/controller/schedule_controller.go
|
||||
if schedule.Spec.SkipImmediately != nil && *schedule.Spec.SkipImmediately {
|
||||
*schedule.Spec.SkipImmediately = false
|
||||
schedule.Status.LastSkipped = &metav1.Time{Time: c.clock.Now()}
|
||||
}
|
||||
```
|
||||
|
||||
`LastSkipped` will be added to `ScheduleStatus` struct to track the last time a schedule was skipped.
|
||||
```diff
|
||||
// ScheduleStatus captures the current state of a Velero schedule
|
||||
@@ -111,8 +97,6 @@ type ScheduleStatus struct {
|
||||
}
|
||||
```
|
||||
|
||||
The `LastSkipped` field is crucial for the schedule controller to determine the next run time. When a backup is skipped, this timestamp is used instead of `LastBackup` to calculate when the next backup should occur, ensuring the schedule maintains its intended cadence even after skipping a backup.
|
||||
|
||||
When `schedule.spec.SkipImmediately` is `true`, `LastSkipped` will be set to the current time, and `schedule.spec.SkipImmediately` set to nil so it can be used again.
|
||||
|
||||
The `getNextRunTime()` function below is updated so `LastSkipped` which is after `LastBackup` will be used to determine next run time.
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
# Adding Support For VolumeAttributes in Resource Policy
|
||||
|
||||
## Abstract
|
||||
Currently [Velero Resource policies](https://velero.io/docs/main/resource-filtering/#creating-resource-policies) are only supporting "Driver" to be filtered for [CSI volume conditions](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources_validator.go#L28)
|
||||
|
||||
If user want to skip certain CSI volumes based on other volume attributes like protocol or SKU, etc, they can't do it with the current Velero resource policies. It would be convenient if Velero resource policies could be extended to filter on volume attributes along with existing driver filter in the resource policies `conditions` to handle the backup of volumes just by `some specific volumes attributes conditions`.
|
||||
|
||||
## Background
|
||||
As of Today, Velero resource policy already provides us the way to filter volumes based on the `driver` name. But it's not enough to handle the volumes based on other volume attributes like protocol, SKU, etc.
|
||||
|
||||
## Example:
|
||||
- Provision Azure NFS: Define the Storage class with `protocol: nfs` under storage class parameters to provision [CSI NFS Azure File Shares](https://learn.microsoft.com/en-us/azure/aks/azure-files-csi#nfs-file-shares).
|
||||
- User wants to back up AFS (Azure file shares) but only want to backup `SMB` type of file share volumes and not `NFS` file share volumes.
|
||||
|
||||
## Goals
|
||||
- We are only bringing additional support in the resource policy to only handle volumes during backup.
|
||||
- Introducing support for `VolumeAttributes` filter along with `driver` filter in CSI volume conditions to handle volumes.
|
||||
|
||||
## Non-Goals
|
||||
- Currently, only handles volumes, and does not support other resources.
|
||||
|
||||
## Use-cases/Scenarios
|
||||
### Skip backup volumes by some volume attributes:
|
||||
Users want to skip PV with the requirements:
|
||||
- option to skip specified PV on volume attributes type (like Protocol as NFS, SMB, etc)
|
||||
|
||||
### Sample Storage Class Used to create such Volumes
|
||||
```
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: azurefile-csi-nfs
|
||||
provisioner: file.csi.azure.com
|
||||
allowVolumeExpansion: true
|
||||
parameters:
|
||||
protocol: nfs
|
||||
```
|
||||
|
||||
## High-Level Design
|
||||
Modifying the existing Resource Policies code for [csiVolumeSource](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources_validator.go#L28C6-L28C22) to add the new `VolumeAttributes` filter for CSI volumes and adding validations in existing [csiCondition](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources.go#L150) to match with volume attributes in the conditions from Resource Policy config map and original persistent volume.
|
||||
|
||||
## Detailed Design
|
||||
The volume resources policies should contain a list of policies which is the combination of conditions and related `action`, when target volumes meet the conditions, the related `action` will take effection.
|
||||
|
||||
Below is the API Design for the user configuration:
|
||||
|
||||
### API Design
|
||||
```go
|
||||
type csiVolumeSource struct {
|
||||
Driver string `yaml:"driver,omitempty"`
|
||||
// [NEW] CSI volume attributes
|
||||
VolumeAttributes map[string]string `yaml:"volumeAttributes,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
The policies YAML config file would look like this:
|
||||
```yaml
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disk.csi.azure.com
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: file.csi.azure.com
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`
|
||||
```
|
||||
|
||||
### New Supported Conditions
|
||||
#### VolumeAttributes
|
||||
Existing CSI Volume Condition can now add `volumeAttributes` which will be key and value pairs.
|
||||
|
||||
Specify details for the related volume source (currently only csi driver is supported filter)
|
||||
```yaml
|
||||
csi: // match volume using `file.csi.azure.com` and with volumeAttributes protocol as nfs
|
||||
driver: file.csi.azure.com
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
```
|
||||
@@ -1,202 +0,0 @@
|
||||
# Add Label Selector as a criteria for Volume Policy
|
||||
|
||||
## Abstract
|
||||
Velero’s volume policies currently support several criteria (such as capacity, storage class, and volume source type) to select volumes for backup. This update extends the design by allowing users to specify required labels on the associated PersistentVolumeClaim (PVC) via a simple key/value map. At runtime, Velero looks up the PVC (when a PV has a ClaimRef), extracts its labels, and compares them with the user-specified map. If all key/value pairs match, the volume qualifies for backup.
|
||||
|
||||
## Background
|
||||
PersistentVolumes (PVs) in Kubernetes are typically bound to PersistentVolumeClaims (PVCs) that include labels (for example, indicating environment, application, or region). Basing backup policies on these PVC labels enables more precise control over which volumes are processed.
|
||||
|
||||
## Goals
|
||||
- Allow users to specify a simple key/value mapping in the volume policy YAML so that only volumes whose associated PVCs contain those labels are selected.
|
||||
- Support policies that target volumes based on criteria such as environment=production or region=us-west.
|
||||
|
||||
## Non-Goals
|
||||
- No changes will be made to the actions (skip, snapshot, fs-backup) of the volume policy engine. This update focuses solely on how volumes are selected.
|
||||
- The design does not support other label selector operations (e.g., NotIn, Exists, DoesNotExist) and only allows for exact key/value matching.
|
||||
|
||||
## Use-cases/scenarios
|
||||
1. Environment-Specific Backup:
|
||||
- A user wishes to back up only those volumes whose associated PVCs have labels such as `environment=production` and `app=database`.
|
||||
- The volume policy specifies a pvcLabels map with those key/value pairs; only volumes whose PVCs match are processed.
|
||||
```yaml
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: database
|
||||
action:
|
||||
type: snapshot
|
||||
```
|
||||
2. Region-Specific Backup:
|
||||
- A user operating in multiple regions wants to back up only volumes in the `us-west` region.
|
||||
- The policy includes `pvcLabels: { region: us-west }`, so only PVs bound to PVCs with that label are selected.
|
||||
```yaml
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
region: us-west
|
||||
action:
|
||||
type: snapshot
|
||||
```
|
||||
3. Automated Label-Based Backups:
|
||||
- An external system automatically labels new PVCs (for example, `backup: true`).
|
||||
- A volume policy with `pvcLabels: { backup: true }` ensures that any new volume whose PVC contains that label is included in backup operations.
|
||||
```yaml
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
backup: true
|
||||
action:
|
||||
type: snapshot
|
||||
```
|
||||
## High-Level Design
|
||||
|
||||
1. Extend Volume Policy Schema:
|
||||
- The YAML schema for volume conditions is extended to include an optional field pvcLabels of type `map[string]string`.
|
||||
2. Implement New Condition Type:
|
||||
- A new condition, `pvcLabelsCondition`, is created. It implements the `volumeCondition` interface and simply compares the user-specified key/value pairs with the actual PVC labels (populated at runtime).
|
||||
3. Update Structured Volume:
|
||||
- The internal representation of a volume (`structuredVolume`) is extended with a new field `pvcLabels map[string]string` to store the labels from the associated PVC.
|
||||
- A new helper function (or an updated parsing function) is used to perform a PVC lookup when a PV has a ClaimRef, populating the pvcLabels field.
|
||||
4. Integrate with Policy Engine:
|
||||
- The policy builder is updated to create and add a `pvcLabelsCondition` if the policy YAML contains a `pvcLabels` entry.
|
||||
- The matching entry point uses the updated `structuredVolume` (populated with PVC labels) to evaluate all conditions, including the new PVC labels condition.
|
||||
## Detailed Design
|
||||
|
||||
1. Update Volume Conditions Schema: Define the conditions struct with a simple map for PVC labels:
|
||||
```go
|
||||
// volumeConditions defines the current format of conditions we parse.
|
||||
type volumeConditions struct {
|
||||
Capacity string `yaml:"capacity,omitempty"`
|
||||
StorageClass []string `yaml:"storageClass,omitempty"`
|
||||
NFS *nFSVolumeSource `yaml:"nfs,omitempty"`
|
||||
CSI *csiVolumeSource `yaml:"csi,omitempty"`
|
||||
VolumeTypes []SupportedVolume `yaml:"volumeTypes,omitempty"`
|
||||
// New field: pvcLabels for simple exact-match filtering.
|
||||
PVCLabels map[string]string `yaml:"pvcLabels,omitempty"`
|
||||
}
|
||||
```
|
||||
2. New Condition: `pvcLabelsCondition`: Implement a condition that compares expected labels with those on the PVC:
|
||||
```go
|
||||
// pvcLabelsCondition defines a condition that matches if the PVC's labels contain all the specified key/value pairs.
|
||||
type pvcLabelsCondition struct {
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) match(v *structuredVolume) bool {
|
||||
if len(c.labels) == 0 {
|
||||
return true // No label condition specified; always match.
|
||||
}
|
||||
if v.pvcLabels == nil {
|
||||
return false // No PVC labels found.
|
||||
}
|
||||
for key, expectedVal := range c.labels {
|
||||
if actualVal, exists := v.pvcLabels[key]; !exists || actualVal != expectedVal {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) validate() error {
|
||||
// No extra validation needed for a simple map.
|
||||
return nil
|
||||
}
|
||||
```
|
||||
3. Update `structuredVolume`: Extend the internal volume representation with a field for PVC labels:
|
||||
```go
|
||||
// structuredVolume represents a volume with parsed fields.
|
||||
type structuredVolume struct {
|
||||
capacity resource.Quantity
|
||||
storageClass string
|
||||
// New field: pvcLabels stores labels from the associated PVC.
|
||||
pvcLabels map[string]string
|
||||
nfs *nFSVolumeSource
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
}
|
||||
```
|
||||
4. Update PVC Lookup – `parsePVWithPVC`: Modify the PV parsing function to perform a PVC lookup:
|
||||
```go
|
||||
func (s *structuredVolume) parsePVWithPVC(pv *corev1.PersistentVolume, client crclient.Client) error {
|
||||
s.capacity = *pv.Spec.Capacity.Storage()
|
||||
s.storageClass = pv.Spec.StorageClassName
|
||||
|
||||
if pv.Spec.NFS != nil {
|
||||
s.nfs = &nFSVolumeSource{
|
||||
Server: pv.Spec.NFS.Server,
|
||||
Path: pv.Spec.NFS.Path,
|
||||
}
|
||||
}
|
||||
if pv.Spec.CSI != nil {
|
||||
s.csi = &csiVolumeSource{
|
||||
Driver: pv.Spec.CSI.Driver,
|
||||
VolumeAttributes: pv.Spec.CSI.VolumeAttributes,
|
||||
}
|
||||
}
|
||||
s.volumeType = getVolumeTypeFromPV(pv)
|
||||
|
||||
// If the PV is bound to a PVC, look it up and store its labels.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
err := client.Get(context.Background(), crclient.ObjectKey{
|
||||
Namespace: pv.Spec.ClaimRef.Namespace,
|
||||
Name: pv.Spec.ClaimRef.Name,
|
||||
}, pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get PVC for PV")
|
||||
}
|
||||
s.pvcLabels = pvc.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
5. Update the Policy Builder: Add the new condition to the policy if pvcLabels is provided:
|
||||
```go
|
||||
func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
|
||||
for _, vp := range resPolicies.VolumePolicies {
|
||||
con, err := unmarshalVolConditions(vp.Conditions)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
volCap, err := parseCapacity(con.Capacity)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
var volP volPolicy
|
||||
volP.action = vp.Action
|
||||
volP.conditions = append(volP.conditions, &capacityCondition{capacity: *volCap})
|
||||
volP.conditions = append(volP.conditions, &storageClassCondition{storageClass: con.StorageClass})
|
||||
volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS})
|
||||
volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI})
|
||||
volP.conditions = append(volP.conditions, &volumeTypeCondition{volumeTypes: con.VolumeTypes})
|
||||
// If a pvcLabels map is provided, add the pvcLabelsCondition.
|
||||
if con.PVCLabels != nil && len(con.PVCLabels) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels})
|
||||
}
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
p.version = resPolicies.Version
|
||||
return nil
|
||||
}
|
||||
```
|
||||
6. Update the Matching Entry Point: Use the updated PV parsing that performs a PVC lookup:
|
||||
```go
|
||||
func (p *Policies) GetMatchAction(res interface{}, client crclient.Client) (*Action, error) {
|
||||
volume := &structuredVolume{}
|
||||
switch obj := res.(type) {
|
||||
case *corev1.PersistentVolume:
|
||||
if err := volume.parsePVWithPVC(obj, client); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse PV with PVC lookup")
|
||||
}
|
||||
case *corev1.Volume:
|
||||
volume.parsePodVolume(obj)
|
||||
default:
|
||||
return nil, errors.New("failed to convert object")
|
||||
}
|
||||
return p.match(volume), nil
|
||||
}
|
||||
```
|
||||
|
||||
Note: The matching loop (p.match(volume)) iterates over all conditions (including our new pvcLabelsCondition) and returns the corresponding action if all conditions match.
|
||||
@@ -191,25 +191,25 @@ type ItemBlockWorkerPool struct {
|
||||
}
|
||||
|
||||
type ItemBlockInput struct {
|
||||
itemBlock *BackupItemBlock
|
||||
itemBlock ItemBlock
|
||||
returnChan chan ItemBlockReturn
|
||||
}
|
||||
|
||||
type ItemBlockReturn struct {
|
||||
itemBlock *BackupItemBlock
|
||||
itemBlock ItemBlock
|
||||
resources []schema.GroupResource
|
||||
err error
|
||||
}
|
||||
|
||||
func (*p ItemBlockWorkerPool) getInputChannel() chan ItemBlockInput
|
||||
func StartItemBlockWorkerPool(context context.Context, workers int, logger logrus.FieldLogger) ItemBlockWorkerPool
|
||||
func processItemBlockWorker(context context.Context, itemBlockChannel chan ItemBlockInput, logger logrus.FieldLogger, wg *sync.WaitGroup)
|
||||
func RunItemBlockWorkers(context context.Context, workers int)
|
||||
func processItemBlocksWorker(context context.Context, itemBlockChannel chan ItemBlockInput, logger logrus.FieldLogger, wg *sync.WaitGroup)
|
||||
```
|
||||
|
||||
The worker pool will be started by calling `StartItemBlockWorkerPool` in `NewBackupReconciler()`, passing in the worker count and reconciler context.
|
||||
`backupreconciler.prepareBackupRequest` will also add the input channel to the `backupRequest` so that it will be available during backup processing.
|
||||
The func `StartItemBlockWorkerPool` will create the `ItemBlockWorkerPool` with a shared buffered input channel (fixed buffer size) and start `workers` gororoutines which will each call `processItemBlockWorker`.
|
||||
The `processItemBlockWorker` func (run by the worker goroutines) will read from `itemBlockChannel`, call `BackupItemBlock` on the retrieved `ItemBlock`, and then send the return value to the retrieved `returnChan`, and then process the next block.
|
||||
The worker pool will be started by calling `RunItemBlockWorkers` in `backupReconciler.SetupWithManager`, passing in the worker count and reconciler context.
|
||||
`SetupWithManager` will also add the input channel to the `itemBackupper` so that it will be available during backup processing.
|
||||
The func `RunItemBlockWorkers` will create the `ItemBlockWorkerPool` with a shared buffered input channel (fixed buffer size) and start `workers` gororoutines which will each call `processItemBlocksWorker`.
|
||||
The `processItemBlocksWorker` func (run by the worker goroutines) will read from `itemBlockChannel`, call `BackupItemBlock` on the retrieved `ItemBlock`, and then send the return value to the retrieved `returnChan`, and then process the next block.
|
||||
|
||||
#### Modify ItemBlock processing loop to send ItemBlocks to the worker pool rather than backing them up directly
|
||||
|
||||
@@ -1,374 +0,0 @@
|
||||
# Design to clean the artifacts generated in the CSI backup and restore workflows
|
||||
|
||||
## Terminology
|
||||
|
||||
* VSC: VolumeSnapshotContent
|
||||
* VS: VolumeSnapshot
|
||||
|
||||
## Abstract
|
||||
* The design aims to delete the unnecessary VSs and VSCs generated during CSI backup and restore process.
|
||||
* The design stop creating related VSCs during backup syncing.
|
||||
|
||||
## Background
|
||||
In the current CSI backup and restore workflows, please notice the CSI B/R workflows means only using the CSI snapshots in the B/R, not including the CSI snapshot data movement workflows, some generated artifacts are kept after the backup or the restore process completion.
|
||||
|
||||
Some of them are kept due to design, for example, the VolumeSnapshotContents generated during the backup are kept to make sure the backup deletion can clean the snapshots in the storage providers.
|
||||
|
||||
Some of them are kept by accident, for example, after restore, two VolumeSnapshotContents are generated for the same VolumeSnapshot. One is from the backup content, and one is dynamically generated from the restore's VolumeSnapshot.
|
||||
|
||||
The design aims to clean the unnecessary artifacts, and make the CSI B/R workflow more concise and reliable.
|
||||
|
||||
## Goals
|
||||
- Clean the redundant VSC generated during CSI backup and restore.
|
||||
- Remove the VSCs in the backup sync process.
|
||||
|
||||
## Non Goals
|
||||
- There were some discussion about whether Velero backup should include VSs and VSCs not generated in during the backup. By far, the conclusion is not including them is a better option. Although that is a useful enhancement, that is not included this design.
|
||||
- Delete all the CSI-related metadata files in the BSL is not the aim of this design.
|
||||
|
||||
## Detailed Design
|
||||
### Backup
|
||||
During backup, the main change is the backup-generated VSCs should not kept anymore.
|
||||
|
||||
The reasons is we don't need them to ensure the snapshots clean up during backup deletion. Please reference to the [Backup Deletion section](#backup-deletion) section for detail.
|
||||
|
||||
As a result, we can simplify the VS deletion logic in the backup. Before, we need to not only delete the VS, but also recreate a static VSC pointing a non-exiting VS.
|
||||
|
||||
The deletion code in VS BackupItemAction can be simplify to the following:
|
||||
|
||||
``` go
|
||||
if backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
|
||||
p.log.
|
||||
WithField("Backup", fmt.Sprintf("%s/%s", backup.Namespace, backup.Name)).
|
||||
WithField("BackupPhase", backup.Status.Phase).Debugf("Cleaning VolumeSnapshots.")
|
||||
|
||||
if vsc == nil {
|
||||
vsc = &snapshotv1api.VolumeSnapshotContent{}
|
||||
}
|
||||
|
||||
csi.DeleteReadyVolumeSnapshot(*vs, *vsc, p.crClient, p.log)
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
|
||||
func DeleteReadyVolumeSnapshot(
|
||||
vs snapshotv1api.VolumeSnapshot,
|
||||
vsc snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
) {
|
||||
logger.Infof("Deleting Volumesnapshot %s/%s", vs.Namespace, vs.Name)
|
||||
if vs.Status == nil ||
|
||||
vs.Status.BoundVolumeSnapshotContentName == nil ||
|
||||
len(*vs.Status.BoundVolumeSnapshotContentName) <= 0 {
|
||||
logger.Errorf("VolumeSnapshot %s/%s is not ready. This is not expected.",
|
||||
vs.Namespace, vs.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil {
|
||||
// Patch the DeletionPolicy of the VolumeSnapshotContent to set it to Retain.
|
||||
// This ensures that the volume snapshot in the storage provider is kept.
|
||||
if err := SetVolumeSnapshotContentDeletionPolicy(
|
||||
vsc.Name,
|
||||
client,
|
||||
snapshotv1api.VolumeSnapshotContentRetain,
|
||||
); err != nil {
|
||||
logger.Warnf("Failed to patch DeletionPolicy of volume snapshot %s/%s",
|
||||
vs.Namespace, vs.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.Delete(context.TODO(), &vsc); err != nil {
|
||||
logger.Warnf("Failed to delete the VSC %s: %s", vsc.Name, err.Error())
|
||||
}
|
||||
}
|
||||
if err := client.Delete(context.TODO(), &vs); err != nil {
|
||||
logger.Warnf("Failed to delete volumesnapshot %s/%s: %v", vs.Namespace, vs.Name, err)
|
||||
} else {
|
||||
logger.Infof("Deleted volumesnapshot with volumesnapshotContent %s/%s",
|
||||
vs.Namespace, vs.Name)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Restore
|
||||
|
||||
#### Restore the VolumeSnapshotContent
|
||||
The current behavior of VSC restoration is that the VSC from the backup is restore, and the restored VS also triggers creating a new VSC dynamically.
|
||||
|
||||
Two VSCs created for the same VS in one restore seems not right.
|
||||
|
||||
Skip restore the VSC from the backup is not a viable alternative, because VSC may reference to a [snapshot create secret](https://kubernetes-csi.github.io/docs/secrets-and-credentials-volume-snapshot-class.html?highlight=snapshotter-secret-name#createdelete-volumesnapshot-secret).
|
||||
|
||||
If the `SkipRestore` is set true in the restore action's result, the secret returned in the additional items is ignored too.
|
||||
|
||||
As a result, restore the VSC from the backup, and setup the VSC and the VS's relation is a better choice.
|
||||
|
||||
Another consideration is the VSC name should not be the same as the backed-up VSC's, because the older version Velero's restore and backup keep the VSC after completion.
|
||||
|
||||
There's high possibility that the restore will fail due to the VSC already exists in the cluster.
|
||||
|
||||
Multiple restores of the same backup will also meet the same problem.
|
||||
|
||||
The proposed solution is using the restore's UID and the VS's name to generate sha256 hash value as the new VSC name. Both the VS and VSC RestoreItemAction can access those UIDs, and it will avoid the conflicts issues.
|
||||
|
||||
The restored VS name also shares the same generated name.
|
||||
|
||||
The VS-referenced VSC name and the VSC's snapshot handle name are in their status.
|
||||
|
||||
Velero restore process purges the restore resources' metadata and status before running the RestoreItemActions.
|
||||
|
||||
As a result, we cannot read these information in the VS and VSC RestoreItemActions.
|
||||
|
||||
Fortunately, RestoreItemAction input parameters includes the `ItemFromBackup`. The status is intact in `ItemFromBackup`.
|
||||
|
||||
``` go
|
||||
func (p *volumeSnapshotRestoreItemAction) Execute(
|
||||
input *velero.RestoreItemActionExecuteInput,
|
||||
) (*velero.RestoreItemActionExecuteOutput, error) {
|
||||
p.log.Info("Starting VolumeSnapshotRestoreItemAction")
|
||||
|
||||
if boolptr.IsSetToFalse(input.Restore.Spec.RestorePVs) {
|
||||
p.log.Infof("Restore %s/%s did not request for PVs to be restored.",
|
||||
input.Restore.Namespace, input.Restore.Name)
|
||||
return &velero.RestoreItemActionExecuteOutput{SkipRestore: true}, nil
|
||||
}
|
||||
|
||||
var vs snapshotv1api.VolumeSnapshot
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.Item.UnstructuredContent(), &vs); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
var vsFromBackup snapshotv1api.VolumeSnapshot
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.ItemFromBackup.UnstructuredContent(), &vsFromBackup); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
// If cross-namespace restore is configured, change the namespace
|
||||
// for VolumeSnapshot object to be restored
|
||||
newNamespace, ok := input.Restore.Spec.NamespaceMapping[vs.GetNamespace()]
|
||||
if !ok {
|
||||
// Use original namespace
|
||||
newNamespace = vs.Namespace
|
||||
}
|
||||
|
||||
if csiutil.IsVolumeSnapshotExists(newNamespace, vs.Name, p.crClient) {
|
||||
p.log.Debugf("VolumeSnapshot %s already exists in the cluster. Return without change.", vs.Namespace+"/"+vs.Name)
|
||||
return &velero.RestoreItemActionExecuteOutput{UpdatedItem: input.Item}, nil
|
||||
}
|
||||
|
||||
newVSCName := generateSha256FromRestoreAndVsUID(string(input.Restore.UID), string(vsFromBackup.UID))
|
||||
// Reset Spec to convert the VolumeSnapshot from using
|
||||
// the dynamic VolumeSnapshotContent to the static one.
|
||||
resetVolumeSnapshotSpecForRestore(&vs, &newVSCName)
|
||||
|
||||
// Reset VolumeSnapshot annotation. By now, only change
|
||||
// DeletionPolicy to Retain.
|
||||
resetVolumeSnapshotAnnotation(&vs)
|
||||
|
||||
vsMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&vs)
|
||||
if err != nil {
|
||||
p.log.Errorf("Fail to convert VS %s to unstructured", vs.Namespace+"/"+vs.Name)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
p.log.Infof(`Returning from VolumeSnapshotRestoreItemAction with
|
||||
no additionalItems`)
|
||||
|
||||
return &velero.RestoreItemActionExecuteOutput{
|
||||
UpdatedItem: &unstructured.Unstructured{Object: vsMap},
|
||||
AdditionalItems: []velero.ResourceIdentifier{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// generateSha256FromRestoreAndVsUID Use the restore UID and the VS UID to generate the new VSC name.
|
||||
// By this way, VS and VSC RIA action can get the same VSC name.
|
||||
func generateSha256FromRestoreAndVsUID(restoreUID string, vsUID string) string {
|
||||
sha256Bytes := sha256.Sum256([]byte(restoreUID + "/" + vsUID))
|
||||
return "vsc-" + hex.EncodeToString(sha256Bytes[:])
|
||||
}
|
||||
```
|
||||
|
||||
#### Restore the VolumeSnapshot
|
||||
``` go
|
||||
// Execute restores a VolumeSnapshotContent object without modification
|
||||
// returning the snapshot lister secret, if any, as additional items to restore.
|
||||
func (p *volumeSnapshotContentRestoreItemAction) Execute(
|
||||
input *velero.RestoreItemActionExecuteInput,
|
||||
) (*velero.RestoreItemActionExecuteOutput, error) {
|
||||
if boolptr.IsSetToFalse(input.Restore.Spec.RestorePVs) {
|
||||
p.log.Infof("Restore did not request for PVs to be restored %s/%s",
|
||||
input.Restore.Namespace, input.Restore.Name)
|
||||
return &velero.RestoreItemActionExecuteOutput{SkipRestore: true}, nil
|
||||
}
|
||||
|
||||
p.log.Info("Starting VolumeSnapshotContentRestoreItemAction")
|
||||
|
||||
var vsc snapshotv1api.VolumeSnapshotContent
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.Item.UnstructuredContent(), &vsc); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
var vscFromBackup snapshotv1api.VolumeSnapshotContent
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.ItemFromBackup.UnstructuredContent(), &vscFromBackup); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Errorf(err.Error(), "failed to convert input.ItemFromBackup from unstructured")
|
||||
}
|
||||
|
||||
// If cross-namespace restore is configured, change the namespace
|
||||
// for VolumeSnapshot object to be restored
|
||||
newNamespace, ok := input.Restore.Spec.NamespaceMapping[vsc.Spec.VolumeSnapshotRef.Namespace]
|
||||
if ok {
|
||||
// Update the referenced VS namespace to the mapping one.
|
||||
vsc.Spec.VolumeSnapshotRef.Namespace = newNamespace
|
||||
}
|
||||
|
||||
// Reset VSC name to align with VS.
|
||||
vsc.Name = generateSha256FromRestoreAndVsUID(string(input.Restore.UID), string(vscFromBackup.Spec.VolumeSnapshotRef.UID))
|
||||
|
||||
// Reset the ResourceVersion and UID of referenced VolumeSnapshot.
|
||||
vsc.Spec.VolumeSnapshotRef.ResourceVersion = ""
|
||||
vsc.Spec.VolumeSnapshotRef.UID = ""
|
||||
|
||||
// Set the DeletionPolicy to Retain to avoid VS deletion will not trigger snapshot deletion
|
||||
vsc.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain
|
||||
|
||||
if vscFromBackup.Status != nil && vscFromBackup.Status.SnapshotHandle != nil {
|
||||
vsc.Spec.Source.VolumeHandle = nil
|
||||
vsc.Spec.Source.SnapshotHandle = vscFromBackup.Status.SnapshotHandle
|
||||
} else {
|
||||
p.log.Errorf("fail to get snapshot handle from VSC %s status", vsc.Name)
|
||||
return nil, errors.Errorf("fail to get snapshot handle from VSC %s status", vsc.Name)
|
||||
}
|
||||
|
||||
additionalItems := []velero.ResourceIdentifier{}
|
||||
if csi.IsVolumeSnapshotContentHasDeleteSecret(&vsc) {
|
||||
additionalItems = append(additionalItems,
|
||||
velero.ResourceIdentifier{
|
||||
GroupResource: schema.GroupResource{Group: "", Resource: "secrets"},
|
||||
Name: vsc.Annotations[velerov1api.PrefixedSecretNameAnnotation],
|
||||
Namespace: vsc.Annotations[velerov1api.PrefixedSecretNamespaceAnnotation],
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
vscMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&vsc)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
p.log.Infof("Returning from VolumeSnapshotContentRestoreItemAction with %d additionalItems",
|
||||
len(additionalItems))
|
||||
return &velero.RestoreItemActionExecuteOutput{
|
||||
UpdatedItem: &unstructured.Unstructured{Object: vscMap},
|
||||
AdditionalItems: additionalItems,
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Backup Sync
|
||||
csi-volumesnapshotclasses.json, csi-volumesnapshotcontents.json, and csi-volumesnapshots.json are CSI-related metadata files in the BSL for each backup.
|
||||
|
||||
csi-volumesnapshotcontents.json and csi-volumesnapshots.json are not needed anymore, but csi-volumesnapshotclasses.json is still needed.
|
||||
|
||||
One concrete scenario is that a backup is created in cluster-A, then the backup is synced to cluster-B, and the backup is deleted in the cluster-B. In this case, we don't have a chance to create the VS and VSC needed VolumeSnapshotClass.
|
||||
|
||||
The VSC deletion workflow proposed by this design needs to create the VSC first. If the VSC's referenced VolumeSnapshotClass doesn't exist in cluster, the creation of VSC will fail.
|
||||
|
||||
As a result, the VolumeSnapshotClass should still be synced in the backup sync process.
|
||||
|
||||
### Backup Deletion
|
||||
Two factors are worthy for consideration for the backup deletion change:
|
||||
* Because the VSCs generated by the backup are not synced anymore, and the VSCs generated during the backup will not be kept too. The backup deletion needs to generate a VSC, then deletes it to make sure the snapshots in the storage provider are clean too.
|
||||
* The VSs generated by the backup are already deleted in the backup process, we don't need a DeleteItemAction for the VS anymore. As a result, the `velero.io/csi-volumesnapshot-delete` plugin is unneeded.
|
||||
|
||||
For the VSC DeleteItemAction, we need to generate a VSC. Because we only care about the snapshot deletion, we don't need to create a VS associated with the VSC.
|
||||
|
||||
Create a static VSC, then point it to a pseudo VS, and reference to the snapshot handle should be enough.
|
||||
|
||||
To avoid the created VSC conflict with older version Velero B/R generated ones, the VSC name is set to `vsc-uuid`.
|
||||
|
||||
The following is an example of the implementation.
|
||||
``` go
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
p.log.WithError(err).Errorf("Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
return errors.Wrapf(err, "Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
}
|
||||
snapCont.Name = "vsc-" + uuid.String()
|
||||
|
||||
snapCont.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentDelete
|
||||
|
||||
snapCont.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: snapCont.Status.SnapshotHandle,
|
||||
}
|
||||
|
||||
snapCont.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
|
||||
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
|
||||
Kind: "VolumeSnapshot",
|
||||
Namespace: "ns-" + string(snapCont.UID),
|
||||
Name: "name-" + string(snapCont.UID),
|
||||
}
|
||||
|
||||
snapCont.ResourceVersion = ""
|
||||
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
|
||||
// Read resource timeout from backup annotation, if not set, use default value.
|
||||
timeout, err := time.ParseDuration(
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation])
|
||||
if err != nil {
|
||||
p.log.Warnf("fail to parse resource timeout annotation %s: %s",
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation], err.Error())
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
p.log.Debugf("resource timeout is set to %s", timeout.String())
|
||||
|
||||
interval := 5 * time.Second
|
||||
|
||||
// Wait until VSC created and ReadyToUse is true.
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
tmpVSC := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKeyFromObject(&snapCont), tmpVSC); err != nil {
|
||||
return false, errors.Wrapf(
|
||||
err, "failed to get VolumeSnapshotContent %s", snapCont.Name,
|
||||
)
|
||||
}
|
||||
|
||||
if tmpVSC.Status != nil && boolptr.IsSetToTrue(tmpVSC.Status.ReadyToUse) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
},
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
Security is not relevant to this design.
|
||||
|
||||
## Compatibility
|
||||
In this design, no new information is added in backup and restore. As a result, this design doesn't have any compatibility issue.
|
||||
|
||||
## Open Issues
|
||||
Please notice the CSI snapshot backup and restore mechanism not supporting all file-store-based volume, e.g. Azure Files, EFS or vSphere CNS File Volume. Only block-based volumes are supported.
|
||||
Refer to [this comment](https://github.com/vmware-tanzu/velero/issues/3151#issuecomment-2623507686) for more details.
|
||||
173
go.mod
173
go.mod
@@ -1,16 +1,14 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.11
|
||||
go 1.22.0
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
cloud.google.com/go/storage v1.40.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.3
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.14
|
||||
@@ -20,7 +18,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7
|
||||
github.com/bombsimon/logrusr/v3 v3.0.0
|
||||
github.com/evanphx/json-patch/v5 v5.9.0
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/fatih/color v1.16.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -31,54 +29,46 @@ require (
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/afero v1.6.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/text v0.23.0
|
||||
google.golang.org/api v0.218.0
|
||||
google.golang.org/grpc v1.69.4
|
||||
google.golang.org/protobuf v1.36.3
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/oauth2 v0.19.0
|
||||
golang.org/x/text v0.16.0
|
||||
google.golang.org/api v0.172.0
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.31.3
|
||||
k8s.io/apiextensions-apiserver v0.31.3
|
||||
k8s.io/apimachinery v0.31.3
|
||||
k8s.io/cli-runtime v0.31.3
|
||||
k8s.io/client-go v0.31.3
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-aggregator v0.31.3
|
||||
k8s.io/metrics v0.31.3
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime v0.19.3
|
||||
k8s.io/api v0.30.5
|
||||
k8s.io/apiextensions-apiserver v0.30.1
|
||||
k8s.io/apimachinery v0.30.5
|
||||
k8s.io/cli-runtime v0.24.0
|
||||
k8s.io/client-go v0.30.5
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/kube-aggregator v0.19.12
|
||||
k8s.io/metrics v0.25.6
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.18.5
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.16.2 // indirect
|
||||
cloud.google.com/go v0.116.0 // indirect
|
||||
cloud.google.com/go/auth v0.14.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
cloud.google.com/go/iam v1.2.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.21.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
||||
cloud.google.com/go v0.112.1 // indirect
|
||||
cloud.google.com/go/compute v1.24.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.7 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
@@ -93,38 +83,34 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
@@ -133,20 +119,20 @@ require (
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.4 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.1 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.84 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.69 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
@@ -155,45 +141,40 @@ require (
|
||||
github.com/natefinch/atomic v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/oklog/run v1.0.0 // indirect
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/prometheus/common v0.52.3 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/vladimirvivien/gexe v0.1.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20250227051353-20bfabbfc7a0
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20241016073907-939dae5f9001
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.23.11-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.22-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -30,7 +30,7 @@ RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/downloa
|
||||
chmod +x /usr/local/kubebuilder/bin/kubebuilder
|
||||
|
||||
# get controller-tools
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0
|
||||
|
||||
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
|
||||
# is not important)
|
||||
@@ -94,7 +94,7 @@ RUN ARCH=$(go env GOARCH) && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.64.5
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.57.2
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
@@ -102,4 +102,4 @@ RUN chmod +x ./kubectl
|
||||
RUN mv ./kubectl /usr/local/bin
|
||||
|
||||
# Fix the "dubious ownership" issue from git when running goreleaser.sh
|
||||
RUN echo "[safe] \n\t directory = *" > /.gitconfig
|
||||
RUN echo "[safe] \n\t directory = *" > /.gitconfig
|
||||
9
hack/ci/build_util.sh
Normal file
9
hack/ci/build_util.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
set -e
|
||||
|
||||
function uploader {
|
||||
gsutil cp $1 gs://$2/$1
|
||||
gsutil -D setacl public-read gs://$2/$1 &> /dev/null
|
||||
}
|
||||
@@ -63,7 +63,7 @@ fi
|
||||
if [[ -z $BRANCH && -z $TAG ]]; then
|
||||
echo "Test Velero container build without pushing, when Dockerfile is changed by PR."
|
||||
BRANCH="${GITHUB_BASE_REF}-container"
|
||||
OUTPUT_TYPE="tar"
|
||||
OUTPUT_TYPE="local,dest=."
|
||||
else
|
||||
OUTPUT_TYPE="registry"
|
||||
fi
|
||||
@@ -88,12 +88,8 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILD_OS" ]]; then
|
||||
BUILD_OS="linux,windows"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILD_ARCH" ]]; then
|
||||
BUILD_ARCH="amd64,arm64"
|
||||
if [[ -z "$BUILDX_PLATFORMS" ]]; then
|
||||
BUILDX_PLATFORMS="linux/amd64,linux/arm64"
|
||||
fi
|
||||
|
||||
# Debugging info
|
||||
@@ -102,15 +98,13 @@ echo "BRANCH: $BRANCH"
|
||||
echo "TAG: $TAG"
|
||||
echo "TAG_LATEST: $TAG_LATEST"
|
||||
echo "VERSION: $VERSION"
|
||||
echo "BUILD_OS: $BUILD_OS"
|
||||
echo "BUILD_ARCH: $BUILD_ARCH"
|
||||
echo "BUILDX_PLATFORMS: $BUILDX_PLATFORMS"
|
||||
|
||||
echo "Building and pushing container images."
|
||||
|
||||
|
||||
VERSION="$VERSION" \
|
||||
TAG_LATEST="$TAG_LATEST" \
|
||||
BUILD_OS="$BUILD_OS" \
|
||||
BUILD_ARCH="$BUILD_ARCH" \
|
||||
BUILD_OUTPUT_TYPE=$OUTPUT_TYPE \
|
||||
make all-containers
|
||||
BUILDX_PLATFORMS="$BUILDX_PLATFORMS" \
|
||||
BUILDX_OUTPUT_TYPE=$OUTPUT_TYPE \
|
||||
make all-containers
|
||||
|
||||
@@ -1,36 +1,35 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..3ff6e6fa1 100644
|
||||
index 5f939c481..1caa51275 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,31 @@ require (
|
||||
@@ -24,32 +24,32 @@ require (
|
||||
github.com/restic/chunker v0.4.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
- golang.org/x/crypto v0.5.0
|
||||
- golang.org/x/net v0.5.0
|
||||
- golang.org/x/oauth2 v0.4.0
|
||||
- golang.org/x/sync v0.1.0
|
||||
+ golang.org/x/crypto v0.21.0
|
||||
+ golang.org/x/net v0.23.0
|
||||
+ golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.1.0
|
||||
- golang.org/x/sys v0.4.0
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.36.0
|
||||
+ golang.org/x/net v0.38.0
|
||||
+ golang.org/x/oauth2 v0.27.0
|
||||
+ golang.org/x/sync v0.12.0
|
||||
+ golang.org/x/sys v0.31.0
|
||||
+ golang.org/x/term v0.30.0
|
||||
+ golang.org/x/text v0.23.0
|
||||
+ golang.org/x/sys v0.18.0
|
||||
+ golang.org/x/term v0.18.0
|
||||
+ golang.org/x/text v0.14.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
require (
|
||||
- cloud.google.com/go v0.108.0 // indirect
|
||||
- cloud.google.com/go/compute v1.15.1 // indirect
|
||||
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
- cloud.google.com/go/iam v0.10.0 // indirect
|
||||
+ cloud.google.com/go v0.110.0 // indirect
|
||||
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
+ cloud.google.com/go/compute v1.19.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
- cloud.google.com/go/iam v0.10.0 // indirect
|
||||
+ cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -49,7 +48,7 @@ index 5f939c481..3ff6e6fa1 100644
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||
@@ -63,11 +62,13 @@ require (
|
||||
@@ -63,9 +63,9 @@ require (
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
@@ -62,59 +61,32 @@ index 5f939c481..3ff6e6fa1 100644
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
-go 1.18
|
||||
+go 1.23.0
|
||||
+
|
||||
+toolchain go1.23.11
|
||||
\ No newline at end of file
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..d7857bb2b 100644
|
||||
index 026e1d2fa..27d4207f4 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,24 @@
|
||||
@@ -1,13 +1,13 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
-cloud.google.com/go v0.108.0 h1:xntQwnfn8oHGX0crLVinvHM+AhXvi3QHQIEcX/2hiWk=
|
||||
-cloud.google.com/go v0.108.0/go.mod h1:lNUfQqusBJp0bgAg6qrHgYFYbTB+dOiob1itwnlD33Q=
|
||||
-cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
|
||||
-cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
|
||||
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
+cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
-cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI=
|
||||
-cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM=
|
||||
-cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
+cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
|
||||
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
|
||||
+cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
|
||||
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
|
||||
cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI=
|
||||
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 h1:VuHAcMq8pU1IWNT/m5yRaGqbK0BiQKHT8X4DTp9CHdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
|
||||
+github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do=
|
||||
@@ -54,6 +55,7 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
+github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -70,8 +72,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
@@ -70,8 +70,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
@@ -125,13 +97,12 @@ index 026e1d2fa..d7857bb2b 100644
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -82,17 +84,18 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
@@ -82,17 +82,17 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
-github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
+github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
@@ -149,82 +120,58 @@ index 026e1d2fa..d7857bb2b 100644
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
@@ -114,6 +117,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6 h1:nz7i1au+nDzgExfqW5Zl6q85XNTvYoGnM5DHiQC0yYs=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.46 h1:Vo3tNmNXuj7ME5qrvN4iadO7b4mzu/RSFdUkUhaPldk=
|
||||
@@ -129,6 +133,7 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P
|
||||
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
|
||||
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -172,8 +177,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
@@ -172,8 +172,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -189,17 +194,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
@@ -189,11 +189,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
+golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -214,17 +219,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
@@ -214,17 +214,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -237,8 +242,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
@@ -237,8 +237,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
@@ -235,7 +182,7 @@ index 026e1d2fa..d7857bb2b 100644
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
@@ -246,15 +251,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
@@ -246,15 +246,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
@@ -255,7 +202,7 @@ index 026e1d2fa..d7857bb2b 100644
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -266,14 +271,15 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
@@ -266,8 +266,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
@@ -266,10 +213,3 @@ index 026e1d2fa..d7857bb2b 100644
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -71,8 +71,7 @@ func (n *namespacedFileStore) Path(selector *corev1api.SecretKeySelector) (strin
|
||||
|
||||
keyFilePath := filepath.Join(n.fsRoot, fmt.Sprintf("%s-%s", selector.Name, selector.Key))
|
||||
|
||||
// owner RW perms, group R perms, no public perms
|
||||
file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640)
|
||||
file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to open credentials file for writing")
|
||||
}
|
||||
|
||||
120
internal/delete/actions/csi/volumesnapshot_action.go
Normal file
120
internal/delete/actions/csi/volumesnapshot_action.go
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
// volumeSnapshotDeleteItemAction is a backup item action plugin for Velero.
|
||||
type volumeSnapshotDeleteItemAction struct {
|
||||
log logrus.FieldLogger
|
||||
crClient crclient.Client
|
||||
}
|
||||
|
||||
// AppliesTo returns information indicating that the
|
||||
// VolumeSnapshotBackupItemAction should be invoked to backup
|
||||
// VolumeSnapshots.
|
||||
func (p *volumeSnapshotDeleteItemAction) AppliesTo() (velero.ResourceSelector, error) {
|
||||
p.log.Debug("VolumeSnapshotBackupItemAction AppliesTo")
|
||||
|
||||
return velero.ResourceSelector{
|
||||
IncludedResources: []string{"volumesnapshots.snapshot.storage.k8s.io"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *volumeSnapshotDeleteItemAction) Execute(
|
||||
input *velero.DeleteItemActionExecuteInput,
|
||||
) error {
|
||||
p.log.Info("Starting VolumeSnapshotDeleteItemAction for volumeSnapshot")
|
||||
|
||||
var vs snapshotv1api.VolumeSnapshot
|
||||
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.Item.UnstructuredContent(),
|
||||
&vs,
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
// We don't want this DeleteItemAction plugin to delete VolumeSnapshot
|
||||
// taken outside of Velero. So skip deleting VolumeSnapshot objects
|
||||
// that were not created in the process of creating the Velero
|
||||
// backup being deleted.
|
||||
if !kubeutil.HasBackupLabel(&vs.ObjectMeta, input.Backup.Name) {
|
||||
p.log.Info(
|
||||
"VolumeSnapshot %s/%s was not taken by backup %s, skipping deletion",
|
||||
vs.Namespace, vs.Name, input.Backup.Name,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
p.log.Infof("Deleting VolumeSnapshot %s/%s", vs.Namespace, vs.Name)
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil {
|
||||
// we patch the DeletionPolicy of the VolumeSnapshotContent
|
||||
// to set it to Delete. This ensures that the volume snapshot
|
||||
// in the storage provider is also deleted.
|
||||
err := csi.SetVolumeSnapshotContentDeletionPolicy(
|
||||
*vs.Status.BoundVolumeSnapshotContentName,
|
||||
p.crClient,
|
||||
)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(
|
||||
err,
|
||||
fmt.Sprintf("failed to patch DeletionPolicy of volume snapshot %s/%s",
|
||||
vs.Namespace, vs.Name),
|
||||
)
|
||||
}
|
||||
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err := p.crClient.Delete(context.TODO(), &vs)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotDeleteItemAction(f client.Factory) plugincommon.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
crClient, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return &volumeSnapshotDeleteItemAction{
|
||||
log: logger,
|
||||
crClient: crClient,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
151
internal/delete/actions/csi/volumesnapshot_action_test.go
Normal file
151
internal/delete/actions/csi/volumesnapshot_action_test.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
factorymocks "github.com/vmware-tanzu/velero/pkg/client/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestVSExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
backup *velerov1api.Backup
|
||||
createVS bool
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumeSnapshot doesn't have backup label",
|
||||
item: velerotest.UnstructuredOrDie(
|
||||
`
|
||||
{
|
||||
"apiVersion": "snapshot.storage.k8s.io/v1",
|
||||
"kind": "VolumeSnapshot",
|
||||
"metadata": {
|
||||
"namespace": "ns",
|
||||
"name": "foo"
|
||||
}
|
||||
}
|
||||
`,
|
||||
),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumeSnapshot doesn't exist in the cluster",
|
||||
vs: builder.ForVolumeSnapshot("foo", "bar").
|
||||
ObjectMeta(builder.WithLabelsMap(
|
||||
map[string]string{velerov1api.BackupNameLabel: "backup"},
|
||||
)).Status().
|
||||
BoundVolumeSnapshotContentName("vsc").
|
||||
Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vs: builder.ForVolumeSnapshot("foo", "bar").
|
||||
ObjectMeta(builder.WithLabelsMap(
|
||||
map[string]string{velerov1api.BackupNameLabel: "backup"},
|
||||
)).Status().
|
||||
BoundVolumeSnapshotContentName("vsc").
|
||||
Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
createVS: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
|
||||
p := volumeSnapshotDeleteItemAction{log: logger, crClient: crClient}
|
||||
|
||||
if test.vs != nil {
|
||||
vsMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(test.vs)
|
||||
require.NoError(t, err)
|
||||
test.item = &unstructured.Unstructured{Object: vsMap}
|
||||
}
|
||||
|
||||
if test.createVS {
|
||||
require.NoError(t, crClient.Create(context.TODO(), test.vs))
|
||||
}
|
||||
|
||||
err := p.Execute(
|
||||
&velero.DeleteItemActionExecuteInput{
|
||||
Item: test.item,
|
||||
Backup: test.backup,
|
||||
},
|
||||
)
|
||||
|
||||
if test.expectErr == false {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVSAppliesTo(t *testing.T) {
|
||||
p := volumeSnapshotDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
}
|
||||
selector, err := p.AppliesTo()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
velero.ResourceSelector{
|
||||
IncludedResources: []string{"volumesnapshots.snapshot.storage.k8s.io"},
|
||||
},
|
||||
selector,
|
||||
)
|
||||
}
|
||||
|
||||
func TestNewVolumeSnapshotDeleteItemAction(t *testing.T) {
|
||||
logger := logrus.StandardLogger()
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
|
||||
f := &factorymocks.Factory{}
|
||||
f.On("KubebuilderClient").Return(nil, fmt.Errorf(""))
|
||||
plugin := NewVolumeSnapshotDeleteItemAction(f)
|
||||
_, err := plugin(logger)
|
||||
require.Error(t, err)
|
||||
|
||||
f1 := &factorymocks.Factory{}
|
||||
f1.On("KubebuilderClient").Return(crClient, nil)
|
||||
plugin1 := NewVolumeSnapshotDeleteItemAction(f1)
|
||||
_, err1 := plugin1(logger)
|
||||
require.NoError(t, err1)
|
||||
}
|
||||
@@ -18,23 +18,19 @@ package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
@@ -81,55 +77,25 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
|
||||
p.log.Infof("Deleting VolumeSnapshotContent %s", snapCont.Name)
|
||||
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
p.log.WithError(err).Errorf("Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
return errors.Wrapf(err, "Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
}
|
||||
snapCont.Name = "vsc-" + uuid.String()
|
||||
|
||||
snapCont.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentDelete
|
||||
|
||||
snapCont.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: snapCont.Status.SnapshotHandle,
|
||||
}
|
||||
|
||||
snapCont.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
|
||||
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
|
||||
Kind: "VolumeSnapshot",
|
||||
Namespace: "ns-" + string(snapCont.UID),
|
||||
Name: "name-" + string(snapCont.UID),
|
||||
}
|
||||
|
||||
snapCont.ResourceVersion = ""
|
||||
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
|
||||
// Read resource timeout from backup annotation, if not set, use default value.
|
||||
timeout, err := time.ParseDuration(
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation])
|
||||
if err != nil {
|
||||
p.log.Warnf("fail to parse resource timeout annotation %s: %s",
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation], err.Error())
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
p.log.Debugf("resource timeout is set to %s", timeout.String())
|
||||
|
||||
interval := 5 * time.Second
|
||||
|
||||
// Wait until VSC created and ReadyToUse is true.
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
return checkVSCReadiness(ctx, &snapCont, p.crClient)
|
||||
},
|
||||
if err := csi.SetVolumeSnapshotContentDeletionPolicy(
|
||||
snapCont.Name,
|
||||
p.crClient,
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
// #4764: Leave a warning when VolumeSnapshotContent cannot be found for deletion.
|
||||
// Manual deleting VolumeSnapshotContent can cause this.
|
||||
// It's tricky for Velero to handle this inconsistency.
|
||||
// Even if Velero restores the VolumeSnapshotContent, CSI snapshot controller
|
||||
// may not delete it correctly due to the snapshot represented by VolumeSnapshotContent
|
||||
// already deleted on cloud provider.
|
||||
if apierrors.IsNotFound(err) {
|
||||
p.log.Warnf(
|
||||
"VolumeSnapshotContent %s of backup %s cannot be found. May leave orphan snapshot %s on cloud provider.",
|
||||
snapCont.Name, input.Backup.Name, *snapCont.Status.SnapshotHandle)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, fmt.Sprintf(
|
||||
"failed to set DeletionPolicy on volumesnapshotcontent %s. Skipping deletion",
|
||||
snapCont.Name))
|
||||
}
|
||||
|
||||
if err := p.crClient.Delete(
|
||||
@@ -143,29 +109,10 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
||||
return nil
|
||||
}
|
||||
|
||||
var checkVSCReadiness = func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
tmpVSC := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := client.Get(ctx, crclient.ObjectKeyFromObject(vsc), tmpVSC); err != nil {
|
||||
return false, errors.Wrapf(
|
||||
err, "failed to get VolumeSnapshotContent %s", vsc.Name,
|
||||
)
|
||||
}
|
||||
|
||||
if tmpVSC.Status != nil && boolptr.IsSetToTrue(tmpVSC.Status.ReadyToUse) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotContentDeleteItemAction(
|
||||
f client.Factory,
|
||||
) plugincommon.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (any, error) {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
crClient, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -22,13 +22,10 @@ import (
|
||||
"testing"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
@@ -40,15 +37,11 @@ import (
|
||||
func TestVSCExecute(t *testing.T) {
|
||||
snapshotHandleStr := "test"
|
||||
tests := []struct {
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
function func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error)
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
createVSC bool
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
@@ -69,30 +62,17 @@ func TestVSCExecute(t *testing.T) {
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
name: "VolumeSnapshotContent doesn't exist in the cluster, no error",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return true, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return false, errors.Errorf("test error case")
|
||||
},
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
createVSC: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -100,7 +80,6 @@ func TestVSCExecute(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
checkVSCReadiness = test.function
|
||||
|
||||
p := volumeSnapshotContentDeleteItemAction{log: logger, crClient: crClient}
|
||||
|
||||
@@ -110,6 +89,10 @@ func TestVSCExecute(t *testing.T) {
|
||||
test.item = &unstructured.Unstructured{Object: vscMap}
|
||||
}
|
||||
|
||||
if test.createVSC {
|
||||
require.NoError(t, crClient.Create(context.TODO(), test.vsc))
|
||||
}
|
||||
|
||||
err := p.Execute(
|
||||
&velero.DeleteItemActionExecuteInput{
|
||||
Item: test.item,
|
||||
@@ -157,54 +140,3 @@ func TestNewVolumeSnapshotContentDeleteItemAction(t *testing.T) {
|
||||
_, err1 := plugin1(logger)
|
||||
require.NoError(t, err1)
|
||||
}
|
||||
|
||||
func TestCheckVSCReadiness(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
createVSC bool
|
||||
expectErr bool
|
||||
ready bool
|
||||
}{
|
||||
{
|
||||
name: "VSC not exist",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: false,
|
||||
expectErr: true,
|
||||
ready: false,
|
||||
},
|
||||
{
|
||||
name: "VSC not ready",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: true,
|
||||
expectErr: false,
|
||||
ready: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
if test.createVSC {
|
||||
require.NoError(t, crClient.Create(ctx, test.vsc))
|
||||
}
|
||||
|
||||
ready, err := checkVSCReadiness(ctx, test.vsc, crClient)
|
||||
require.Equal(t, test.ready, ready)
|
||||
if test.expectErr {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ func (h *harness) addResource(t *testing.T, resource *test.APIResource) {
|
||||
}
|
||||
|
||||
// recordResourcesAction is a delete item action that can be configured to run
|
||||
// for specific resources/namespaces and simply record the items that is
|
||||
// for specific resources/namespaces and simply record the items that is is
|
||||
// executed for.
|
||||
type recordResourcesAction struct {
|
||||
selector velero.ResourceSelector
|
||||
|
||||
@@ -46,9 +46,6 @@ type hookKey struct {
|
||||
// Container indicates the container hooks use.
|
||||
// For hooks specified in the backup/restore spec, the container might be the same under different hookName.
|
||||
container string
|
||||
// hookIndex contains the slice index for the specific hook, in order to track multiple hooks
|
||||
// for the same container
|
||||
hookIndex int
|
||||
}
|
||||
|
||||
// hookStatus records the execution status of a specific hook.
|
||||
@@ -86,7 +83,7 @@ func NewHookTracker() *HookTracker {
|
||||
// Add adds a hook to the hook tracker
|
||||
// Add must precede the Record for each individual hook.
|
||||
// In other words, a hook must be added to the tracker before its execution result is recorded.
|
||||
func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int) {
|
||||
func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase HookPhase) {
|
||||
ht.lock.Lock()
|
||||
defer ht.lock.Unlock()
|
||||
|
||||
@@ -97,7 +94,6 @@ func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName st
|
||||
container: container,
|
||||
hookPhase: hookPhase,
|
||||
hookName: hookName,
|
||||
hookIndex: hookIndex,
|
||||
}
|
||||
|
||||
if _, ok := ht.tracker[key]; !ok {
|
||||
@@ -112,7 +108,7 @@ func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName st
|
||||
// Record records the hook's execution status
|
||||
// Add must precede the Record for each individual hook.
|
||||
// In other words, a hook must be added to the tracker before its execution result is recorded.
|
||||
func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int, hookFailed bool, hookErr error) error {
|
||||
func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookFailed bool, hookErr error) error {
|
||||
ht.lock.Lock()
|
||||
defer ht.lock.Unlock()
|
||||
|
||||
@@ -123,7 +119,6 @@ func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName
|
||||
container: container,
|
||||
hookPhase: hookPhase,
|
||||
hookName: hookName,
|
||||
hookIndex: hookIndex,
|
||||
}
|
||||
|
||||
if _, ok := ht.tracker[key]; !ok {
|
||||
@@ -184,24 +179,24 @@ func NewMultiHookTracker() *MultiHookTracker {
|
||||
}
|
||||
|
||||
// Add adds a backup/restore hook to the tracker
|
||||
func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int) {
|
||||
func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase) {
|
||||
mht.lock.Lock()
|
||||
defer mht.lock.Unlock()
|
||||
|
||||
if _, ok := mht.trackers[name]; !ok {
|
||||
mht.trackers[name] = NewHookTracker()
|
||||
}
|
||||
mht.trackers[name].Add(podNamespace, podName, container, source, hookName, hookPhase, hookIndex)
|
||||
mht.trackers[name].Add(podNamespace, podName, container, source, hookName, hookPhase)
|
||||
}
|
||||
|
||||
// Record records a backup/restore hook execution status
|
||||
func (mht *MultiHookTracker) Record(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int, hookFailed bool, hookErr error) error {
|
||||
func (mht *MultiHookTracker) Record(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookFailed bool, hookErr error) error {
|
||||
mht.lock.RLock()
|
||||
defer mht.lock.RUnlock()
|
||||
|
||||
var err error
|
||||
if _, ok := mht.trackers[name]; ok {
|
||||
err = mht.trackers[name].Record(podNamespace, podName, container, source, hookName, hookPhase, hookIndex, hookFailed, hookErr)
|
||||
err = mht.trackers[name].Record(podNamespace, podName, container, source, hookName, hookPhase, hookFailed, hookErr)
|
||||
} else {
|
||||
err = fmt.Errorf("the backup/restore not exist in hook tracker, backup/restore name: %s", name)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestNewHookTracker(t *testing.T) {
|
||||
func TestHookTracker_Add(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -50,8 +50,8 @@ func TestHookTracker_Add(t *testing.T) {
|
||||
|
||||
func TestHookTracker_Record(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
err := tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
err := tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -67,10 +67,10 @@ func TestHookTracker_Record(t *testing.T) {
|
||||
assert.True(t, info.hookExecuted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
assert.Error(t, err)
|
||||
|
||||
err = tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, false, nil)
|
||||
err = tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.hookFailed)
|
||||
}
|
||||
@@ -78,30 +78,29 @@ func TestHookTracker_Record(t *testing.T) {
|
||||
func TestHookTracker_Stat(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 0)
|
||||
tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 1)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", "")
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
|
||||
attempted, failed := tracker.Stat()
|
||||
assert.Equal(t, 3, attempted)
|
||||
assert.Equal(t, 2, attempted)
|
||||
assert.Equal(t, 1, failed)
|
||||
}
|
||||
|
||||
func TestHookTracker_IsComplete(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0, true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true, fmt.Errorf("err"))
|
||||
assert.True(t, tracker.IsComplete())
|
||||
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
assert.False(t, tracker.IsComplete())
|
||||
}
|
||||
|
||||
func TestHookTracker_HookErrs(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
|
||||
hookErrs := tracker.HookErrs()
|
||||
assert.Len(t, hookErrs, 1)
|
||||
@@ -110,7 +109,7 @@ func TestHookTracker_HookErrs(t *testing.T) {
|
||||
func TestMultiHookTracker_Add(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -119,7 +118,6 @@ func TestMultiHookTracker_Add(t *testing.T) {
|
||||
hookPhase: "",
|
||||
hookSource: HookSourceAnnotation,
|
||||
hookName: "h1",
|
||||
hookIndex: 0,
|
||||
}
|
||||
|
||||
_, ok := mht.trackers["restore1"].tracker[key]
|
||||
@@ -128,8 +126,8 @@ func TestMultiHookTracker_Add(t *testing.T) {
|
||||
|
||||
func TestMultiHookTracker_Record(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
err := mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
err := mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -138,7 +136,6 @@ func TestMultiHookTracker_Record(t *testing.T) {
|
||||
hookPhase: "",
|
||||
hookSource: HookSourceAnnotation,
|
||||
hookName: "h1",
|
||||
hookIndex: 0,
|
||||
}
|
||||
|
||||
info := mht.trackers["restore1"].tracker[key]
|
||||
@@ -146,31 +143,29 @@ func TestMultiHookTracker_Record(t *testing.T) {
|
||||
assert.True(t, info.hookExecuted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
err = mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
assert.Error(t, err)
|
||||
|
||||
err = mht.Record("restore2", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
err = mht.Record("restore2", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMultiHookTracker_Stat(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Add("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 0)
|
||||
mht.Add("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 1)
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 0, false, nil)
|
||||
mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 1, false, nil)
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Add("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "")
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", false, nil)
|
||||
|
||||
attempted, failed := mht.Stat("restore1")
|
||||
assert.Equal(t, 3, attempted)
|
||||
assert.Equal(t, 2, attempted)
|
||||
assert.Equal(t, 1, failed)
|
||||
}
|
||||
|
||||
func TestMultiHookTracker_Delete(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Delete("restore1")
|
||||
|
||||
_, ok := mht.trackers["restore1"]
|
||||
@@ -179,11 +174,11 @@ func TestMultiHookTracker_Delete(t *testing.T) {
|
||||
|
||||
func TestMultiHookTracker_IsComplete(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0)
|
||||
mht.Record("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0, true, fmt.Errorf("err"))
|
||||
mht.Add("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre)
|
||||
mht.Record("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true, fmt.Errorf("err"))
|
||||
assert.True(t, mht.IsComplete("backup1"))
|
||||
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
assert.False(t, mht.IsComplete("restore1"))
|
||||
|
||||
assert.True(t, mht.IsComplete("restore2"))
|
||||
@@ -191,8 +186,8 @@ func TestMultiHookTracker_IsComplete(t *testing.T) {
|
||||
|
||||
func TestMultiHookTracker_HookErrs(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
|
||||
hookErrs := mht.HookErrs("restore1")
|
||||
assert.Len(t, hookErrs, 1)
|
||||
|
||||
@@ -223,7 +223,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
hookFromAnnotations = getPodExecHookFromAnnotations(metadata.GetAnnotations(), "", log)
|
||||
}
|
||||
if hookFromAnnotations != nil {
|
||||
hookTracker.Add(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, 0)
|
||||
hookTracker.Add(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase)
|
||||
|
||||
hookLog := log.WithFields(
|
||||
logrus.Fields{
|
||||
@@ -239,7 +239,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
hookLog.WithError(errExec).Error("Error executing hook")
|
||||
hookFailed = true
|
||||
}
|
||||
errTracker := hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, 0, hookFailed, errExec)
|
||||
errTracker := hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, hookFailed, errExec)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -267,10 +267,10 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
hooks = resourceHook.Post
|
||||
}
|
||||
|
||||
for i, hook := range hooks {
|
||||
for _, hook := range hooks {
|
||||
if groupResource == kuberesource.Pods {
|
||||
if hook.Exec != nil {
|
||||
hookTracker.Add(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, i)
|
||||
hookTracker.Add(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase)
|
||||
// The remaining hooks will only be executed if modeFailError is nil.
|
||||
// Otherwise, execution will stop and only hook collection will occur.
|
||||
if modeFailError == nil {
|
||||
@@ -291,7 +291,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
modeFailError = err
|
||||
}
|
||||
}
|
||||
errTracker := hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, i, hookFailed, err)
|
||||
errTracker := hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, hookFailed, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -534,11 +534,6 @@ type PodExecRestoreHook struct {
|
||||
HookSource string
|
||||
Hook velerov1api.ExecRestoreHook
|
||||
executed bool
|
||||
// hookIndex contains the slice index for the specific hook from the restore spec
|
||||
// in order to track multiple hooks. Stored here because restore hook results are recorded
|
||||
// outside of the original slice iteration
|
||||
// for the same container
|
||||
hookIndex int
|
||||
}
|
||||
|
||||
// GroupRestoreExecHooks returns a list of hooks to be executed in a pod grouped by
|
||||
@@ -566,13 +561,12 @@ func GroupRestoreExecHooks(
|
||||
if hookFromAnnotation.Container == "" {
|
||||
hookFromAnnotation.Container = pod.Spec.Containers[0].Name
|
||||
}
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "<from-annotation>", HookPhase(""), 0)
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "<from-annotation>", HookPhase(""))
|
||||
byContainer[hookFromAnnotation.Container] = []PodExecRestoreHook{
|
||||
{
|
||||
HookName: "<from-annotation>",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: *hookFromAnnotation,
|
||||
hookIndex: 0,
|
||||
},
|
||||
}
|
||||
return byContainer, nil
|
||||
@@ -585,7 +579,7 @@ func GroupRestoreExecHooks(
|
||||
if !rrh.Selector.applicableTo(kuberesource.Pods, namespace, labels) {
|
||||
continue
|
||||
}
|
||||
for i, rh := range rrh.RestoreHooks {
|
||||
for _, rh := range rrh.RestoreHooks {
|
||||
if rh.Exec == nil {
|
||||
continue
|
||||
}
|
||||
@@ -593,7 +587,6 @@ func GroupRestoreExecHooks(
|
||||
HookName: rrh.Name,
|
||||
Hook: *rh.Exec,
|
||||
HookSource: HookSourceSpec,
|
||||
hookIndex: i,
|
||||
}
|
||||
// default to false if attr WaitForReady not set
|
||||
if named.Hook.WaitForReady == nil {
|
||||
@@ -603,7 +596,7 @@ func GroupRestoreExecHooks(
|
||||
if named.Hook.Container == "" {
|
||||
named.Hook.Container = pod.Spec.Containers[0].Name
|
||||
}
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, HookPhase(""), i)
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, HookPhase(""))
|
||||
byContainer[named.Hook.Container] = append(byContainer[named.Hook.Container], named)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1151,7 +1151,6 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
WaitForReady: boolptr.False(),
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
{
|
||||
HookName: "hook1",
|
||||
@@ -1164,7 +1163,6 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute * 2},
|
||||
WaitForReady: boolptr.False(),
|
||||
},
|
||||
hookIndex: 2,
|
||||
},
|
||||
{
|
||||
HookName: "hook2",
|
||||
@@ -1177,7 +1175,6 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute * 4},
|
||||
WaitForReady: boolptr.True(),
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
},
|
||||
"container2": {
|
||||
@@ -1192,7 +1189,6 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Second * 3},
|
||||
WaitForReady: boolptr.False(),
|
||||
},
|
||||
hookIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -116,7 +116,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
// not yet been observed to be running. It relies on the Informer not to be called concurrently.
|
||||
// When a container is observed running and its hooks are executed, the container is deleted
|
||||
// from the byContainer map. When the map is empty the watch is ended.
|
||||
handler := func(newObj any) {
|
||||
handler := func(newObj interface{}) {
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
@@ -169,7 +169,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookLog.Error(err)
|
||||
errors = append(errors, err)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookFailed = true
|
||||
}
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, hookFailed, hookErr)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hookFailed, hookErr)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -214,23 +214,18 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
|
||||
selector := fields.OneTermEqualSelector("metadata.name", pod.Name)
|
||||
lw := e.ListWatchFactory.NewListWatch(pod.Namespace, selector)
|
||||
_, podWatcher := cache.NewInformerWithOptions(cache.InformerOptions{
|
||||
ListerWatcher: lw,
|
||||
ObjectType: pod,
|
||||
ResyncPeriod: 0,
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: handler,
|
||||
UpdateFunc: func(_, newObj any) {
|
||||
handler(newObj)
|
||||
},
|
||||
DeleteFunc: func(obj any) {
|
||||
err := fmt.Errorf("pod %s deleted before all hooks were executed", kube.NamespaceAndName(pod))
|
||||
log.Error(err)
|
||||
cancel()
|
||||
},
|
||||
|
||||
_, podWatcher := cache.NewInformer(lw, pod, 0, cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: handler,
|
||||
UpdateFunc: func(_, newObj interface{}) {
|
||||
handler(newObj)
|
||||
},
|
||||
},
|
||||
)
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
err := fmt.Errorf("pod %s deleted before all hooks were executed", kube.NamespaceAndName(pod))
|
||||
log.Error(err)
|
||||
cancel()
|
||||
},
|
||||
})
|
||||
|
||||
podWatcher.Run(ctx.Done())
|
||||
|
||||
@@ -239,7 +234,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
// containers to become ready.
|
||||
// Each unexecuted hook is logged as an error and this error will be returned from this function.
|
||||
for _, hooks := range byContainer {
|
||||
for i, hook := range hooks {
|
||||
for _, hook := range hooks {
|
||||
if hook.executed {
|
||||
continue
|
||||
}
|
||||
@@ -252,7 +247,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
},
|
||||
)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
||||
@@ -999,6 +999,11 @@ func TestMaxHookWait(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRestoreHookTrackerUpdate(t *testing.T) {
|
||||
type change struct {
|
||||
// delta to wait since last change applied or pod added
|
||||
wait time.Duration
|
||||
updated *v1.Pod
|
||||
}
|
||||
type expectedExecution struct {
|
||||
hook *velerov1api.ExecHook
|
||||
name string
|
||||
@@ -1007,17 +1012,17 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
hookTracker1 := NewMultiHookTracker()
|
||||
hookTracker1.Add("restore1", "default", "my-pod", "container1", HookSourceAnnotation, "<from-annotation>", HookPhase(""), 0)
|
||||
hookTracker1.Add("restore1", "default", "my-pod", "container1", HookSourceAnnotation, "<from-annotation>", HookPhase(""))
|
||||
|
||||
hookTracker2 := NewMultiHookTracker()
|
||||
hookTracker2.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""), 0)
|
||||
hookTracker2.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
|
||||
hookTracker3 := NewMultiHookTracker()
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""), 0)
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container2", HookSourceSpec, "my-hook-2", HookPhase(""), 0)
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container2", HookSourceSpec, "my-hook-2", HookPhase(""))
|
||||
|
||||
hookTracker4 := NewMultiHookTracker()
|
||||
hookTracker4.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""), 0)
|
||||
hookTracker4.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
|
||||
tests1 := []struct {
|
||||
name string
|
||||
|
||||
@@ -429,69 +429,69 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
||||
|
||||
func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
||||
pvcStandardSc := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": map[string]any{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-pvc",
|
||||
"namespace": "foo",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"storageClassName": "standard",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pvcPremiumSc := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": map[string]any{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-pvc",
|
||||
"namespace": "foo",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"storageClassName": "premium",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pvcGoldSc := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": map[string]any{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-pvc",
|
||||
"namespace": "foo",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"storageClassName": "gold",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
deployNginxOneReplica := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]any{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-deployment",
|
||||
"namespace": "foo",
|
||||
"labels": map[string]any{
|
||||
"labels": map[string]interface{}{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"replicas": int64(1),
|
||||
"template": map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"labels": map[string]any{
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"containers": []any{
|
||||
map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"containers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "nginx",
|
||||
"image": "nginx:latest",
|
||||
},
|
||||
@@ -502,27 +502,27 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
||||
},
|
||||
}
|
||||
deployNginxTwoReplica := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]any{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-deployment",
|
||||
"namespace": "foo",
|
||||
"labels": map[string]any{
|
||||
"labels": map[string]interface{}{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"replicas": int64(2),
|
||||
"template": map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"labels": map[string]any{
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"containers": []any{
|
||||
map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"containers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "nginx",
|
||||
"image": "nginx:latest",
|
||||
},
|
||||
@@ -533,31 +533,31 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
||||
},
|
||||
}
|
||||
deployNginxMysql := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]any{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-deployment",
|
||||
"namespace": "foo",
|
||||
"labels": map[string]any{
|
||||
"labels": map[string]interface{}{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"replicas": int64(1),
|
||||
"template": map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"labels": map[string]any{
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"containers": []any{
|
||||
map[string]any{
|
||||
"spec": map[string]interface{}{
|
||||
"containers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "nginx",
|
||||
"image": "nginx:latest",
|
||||
},
|
||||
map[string]any{
|
||||
map[string]interface{}{
|
||||
"name": "mysql",
|
||||
"image": "mysql:latest",
|
||||
},
|
||||
@@ -568,19 +568,19 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
||||
},
|
||||
}
|
||||
cmTrue := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"data": map[string]any{
|
||||
"data": map[string]interface{}{
|
||||
"test": "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
cmFalse := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"data": map[string]any{
|
||||
"data": map[string]interface{}{
|
||||
"test": "false",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -53,7 +53,7 @@ func (p *StrategicMergePatcher) Patch(u *unstructured.Unstructured, _ logrus.Fie
|
||||
|
||||
// strategicPatchObject applies a strategic merge patch of `patchBytes` to
|
||||
// `originalObject` and stores the result in `objToUpdate`.
|
||||
// It additionally returns the map[string]any representation of the
|
||||
// It additionally returns the map[string]interface{} representation of the
|
||||
// `originalObject` and `patchBytes`.
|
||||
// NOTE: Both `originalObject` and `objToUpdate` are supposed to be versioned.
|
||||
func strategicPatchObject(
|
||||
@@ -67,7 +67,7 @@ func strategicPatchObject(
|
||||
return err
|
||||
}
|
||||
|
||||
patchMap := make(map[string]any)
|
||||
patchMap := make(map[string]interface{})
|
||||
var strictErrs []error
|
||||
strictErrs, err = kubejson.UnmarshalStrict(patchBytes, &patchMap)
|
||||
if err != nil {
|
||||
@@ -84,8 +84,8 @@ func strategicPatchObject(
|
||||
// <originalMap> and stores the result in <objToUpdate>.
|
||||
// NOTE: <objToUpdate> must be a versioned object.
|
||||
func applyPatchToObject(
|
||||
originalMap map[string]any,
|
||||
patchMap map[string]any,
|
||||
originalMap map[string]interface{},
|
||||
patchMap map[string]interface{},
|
||||
objToUpdate runtime.Object,
|
||||
schemaReferenceObj runtime.Object,
|
||||
strictErrs []error,
|
||||
@@ -117,9 +117,14 @@ func applyPatchToObject(
|
||||
})
|
||||
}
|
||||
} else if len(strictErrs) > 0 {
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
|
||||
})
|
||||
switch {
|
||||
//case validationDirective == metav1.FieldValidationWarn:
|
||||
// addStrictDecodingWarnings(requestContext, strictErrs)
|
||||
default:
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -46,14 +46,14 @@ type Action struct {
|
||||
// Type defined specific type of action, currently only support 'skip'
|
||||
Type VolumeActionType `yaml:"type"`
|
||||
// Parameters defined map of parameters when executing a specific action
|
||||
Parameters map[string]any `yaml:"parameters,omitempty"`
|
||||
Parameters map[string]interface{} `yaml:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
// volumePolicy defined policy to conditions to match Volumes and related action to handle matched Volumes
|
||||
type VolumePolicy struct {
|
||||
// Conditions defined list of conditions to match Volumes
|
||||
Conditions map[string]any `yaml:"conditions"`
|
||||
Action Action `yaml:"action"`
|
||||
Conditions map[string]interface{} `yaml:"conditions"`
|
||||
Action Action `yaml:"action"`
|
||||
}
|
||||
|
||||
// resourcePolicies currently defined slice of volume policies to handle backup
|
||||
@@ -76,16 +76,6 @@ func unmarshalResourcePolicies(yamlData *string) (*ResourcePolicies, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode yaml data into resource policies %v", err)
|
||||
}
|
||||
|
||||
for _, vp := range resPolicies.VolumePolicies {
|
||||
if raw, ok := vp.Conditions["pvcLabels"]; ok {
|
||||
switch raw.(type) {
|
||||
case map[string]any, map[string]string:
|
||||
default:
|
||||
return nil, fmt.Errorf("pvcLabels must be a map of string to string, got %T", raw)
|
||||
}
|
||||
}
|
||||
}
|
||||
return resPolicies, nil
|
||||
}
|
||||
|
||||
@@ -106,9 +96,6 @@ func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
|
||||
volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS})
|
||||
volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI})
|
||||
volP.conditions = append(volP.conditions, &volumeTypeCondition{volumeTypes: con.VolumeTypes})
|
||||
if len(con.PVCLabels) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels})
|
||||
}
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
|
||||
@@ -135,28 +122,16 @@ func (p *Policies) match(res *structuredVolume) *Action {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Policies) GetMatchAction(res any) (*Action, error) {
|
||||
data, ok := res.(VolumeFilterData)
|
||||
if !ok {
|
||||
return nil, errors.New("failed to convert input to VolumeFilterData")
|
||||
}
|
||||
|
||||
func (p *Policies) GetMatchAction(res interface{}) (*Action, error) {
|
||||
volume := &structuredVolume{}
|
||||
switch {
|
||||
case data.PersistentVolume != nil:
|
||||
volume.parsePV(data.PersistentVolume)
|
||||
if data.PVC != nil {
|
||||
volume.parsePVC(data.PVC)
|
||||
}
|
||||
case data.PodVolume != nil:
|
||||
volume.parsePodVolume(data.PodVolume)
|
||||
if data.PVC != nil {
|
||||
volume.parsePVC(data.PVC)
|
||||
}
|
||||
switch obj := res.(type) {
|
||||
case *v1.PersistentVolume:
|
||||
volume.parsePV(obj)
|
||||
case *v1.Volume:
|
||||
volume.parsePodVolume(obj)
|
||||
default:
|
||||
return nil, errors.New("failed to convert object")
|
||||
}
|
||||
|
||||
return p.match(volume), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -93,70 +93,21 @@ func TestLoadResourcePolicies(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "supported format volume policies",
|
||||
name: "supported formart volume policies",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: '0,100Gi'
|
||||
csi:
|
||||
driver: aws.efs.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "supported format csi driver with volumeAttributes for volume policies",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: '0,100Gi'
|
||||
csi:
|
||||
driver: aws.efs.csi.driver
|
||||
volumeAttributes:
|
||||
key1: value1
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "supported format pvcLabels",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: database
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "error format of pvcLabels (not a map)",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels: "production"
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
csi:
|
||||
driver: aws.efs.csi.driver
|
||||
nfs: {}
|
||||
storageClass:
|
||||
- gp2
|
||||
- ebs-sc
|
||||
action:
|
||||
type: skip`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "supported format pvcLabels with extra keys",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
region: us-west
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
@@ -175,54 +126,36 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "files.csi.driver",
|
||||
"volumeAttributes": map[string]string{"protocol": "nfs"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "snapshot"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "10,100Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "fs-backup"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "snapshot"},
|
||||
Conditions: map[string]any{
|
||||
"pvcLabels": map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
@@ -239,24 +172,6 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
||||
},
|
||||
expectedAction: &Action{Type: "skip"},
|
||||
},
|
||||
{
|
||||
name: "match policy AFS NFS",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "afs-nfs",
|
||||
csi: &csiVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
},
|
||||
expectedAction: &Action{Type: "skip"},
|
||||
},
|
||||
{
|
||||
name: "match policy AFS SMB",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "afs-smb",
|
||||
csi: &csiVolumeSource{Driver: "files.csi.driver"},
|
||||
},
|
||||
expectedAction: nil,
|
||||
},
|
||||
{
|
||||
name: "both matches return the first policy",
|
||||
volume: &structuredVolume{
|
||||
@@ -275,29 +190,6 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
||||
},
|
||||
expectedAction: nil,
|
||||
},
|
||||
{
|
||||
name: "match pvcLabels condition",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "some-class",
|
||||
pvcLabels: map[string]string{
|
||||
"environment": "production",
|
||||
"team": "backend",
|
||||
},
|
||||
},
|
||||
expectedAction: &Action{Type: "snapshot"},
|
||||
},
|
||||
{
|
||||
name: "mismatch pvcLabels condition",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "some-class",
|
||||
pvcLabels: map[string]string{
|
||||
"environment": "staging",
|
||||
},
|
||||
},
|
||||
expectedAction: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -334,27 +226,7 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"test-data": `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: '0,10Gi'
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
"test-data": "version: v1\nvolumePolicies:\n- conditions:\n capacity: '0,10Gi'\n action:\n type: skip",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -364,39 +236,13 @@ volumePolicies:
|
||||
|
||||
// Check that the returned resourcePolicies object contains the expected data
|
||||
assert.Equal(t, "v1", resPolicies.version)
|
||||
|
||||
assert.Len(t, resPolicies.volumePolicies, 3)
|
||||
|
||||
assert.Len(t, resPolicies.volumePolicies, 1)
|
||||
policies := ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"csi": map[string]any{
|
||||
"driver": "disks.csi.driver",
|
||||
},
|
||||
},
|
||||
Action: Action{
|
||||
Type: Skip,
|
||||
},
|
||||
},
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"csi": map[string]any{
|
||||
"driver": "files.csi.driver",
|
||||
"volumeAttributes": map[string]string{"protocol": "nfs"},
|
||||
},
|
||||
},
|
||||
Action: Action{
|
||||
Type: Skip,
|
||||
},
|
||||
},
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcLabels": map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
Action: Action{
|
||||
Type: Skip,
|
||||
@@ -404,13 +250,11 @@ volumePolicies:
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p := &Policies{}
|
||||
err = p.BuildPolicy(&policies)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build policy: %v", err)
|
||||
t.Fatalf("failed to build policy with error %v", err)
|
||||
}
|
||||
|
||||
assert.Equal(t, p, resPolicies)
|
||||
}
|
||||
|
||||
@@ -419,8 +263,6 @@ func TestGetMatchAction(t *testing.T) {
|
||||
name string
|
||||
yamlData string
|
||||
vol *v1.PersistentVolume
|
||||
podVol *v1.Volume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
skip bool
|
||||
}{
|
||||
{
|
||||
@@ -456,173 +298,7 @@ volumePolicies:
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS CSI condition with Disk volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS CSI condition with AFS volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS NFS CSI condition with Disk volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS NFS CSI condition with AFS SMB volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS NFS CSI condition with AFS NFS volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "Skip Disk and AFS NFS CSI condition with Disk volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "Skip Disk and AFS NFS CSI condition with AFS SMB volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip Disk and AFS NFS CSI condition with AFS NFS volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1", "protocol": "nfs"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "csi not configured and testing capacity condition",
|
||||
name: "csi not configured",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
@@ -739,224 +415,6 @@ volumePolicies:
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC labels match",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PVC labels match, criteria label is a subset of the pvc labels",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production", "app": "backend"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PVC labels match don't match exactly",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: frontend
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC labels mismatch",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-2",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "staging"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels match",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-1"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels mismatch",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-2"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-2",
|
||||
Labels: map[string]string{"environment": "staging"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels match with extra keys on PVC",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-3"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-3",
|
||||
Labels: map[string]string{"environment": "production", "app": "backend"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels don't match exactly",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: frontend
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-4"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-4",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
@@ -968,20 +426,7 @@ volumePolicies:
|
||||
policies := &Policies{}
|
||||
err = policies.BuildPolicy(resPolicies)
|
||||
assert.NoError(t, err)
|
||||
vfd := VolumeFilterData{}
|
||||
if tc.pvc != nil {
|
||||
vfd.PVC = tc.pvc
|
||||
}
|
||||
|
||||
if tc.vol != nil {
|
||||
vfd.PersistentVolume = tc.vol
|
||||
}
|
||||
|
||||
if tc.podVol != nil {
|
||||
vfd.PodVolume = tc.podVol
|
||||
}
|
||||
|
||||
action, err := policies.GetMatchAction(vfd)
|
||||
action, err := policies.GetMatchAction(tc.vol)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if tc.skip {
|
||||
@@ -994,82 +439,3 @@ volumePolicies:
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMatchAction_Errors(t *testing.T) {
|
||||
p := &Policies{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input any
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "invalid input type",
|
||||
input: "invalid input",
|
||||
expectedErr: "failed to convert input to VolumeFilterData",
|
||||
},
|
||||
{
|
||||
name: "no volume provided",
|
||||
input: VolumeFilterData{
|
||||
PersistentVolume: nil,
|
||||
PodVolume: nil,
|
||||
PVC: nil,
|
||||
},
|
||||
expectedErr: "failed to convert object",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
action, err := p.GetMatchAction(tc.input)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.expectedErr)
|
||||
assert.Nil(t, action)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
expectedLabels map[string]string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid PVC with labels",
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"env": "prod"},
|
||||
},
|
||||
},
|
||||
expectedLabels: map[string]string{"env": "prod"},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid PVC with empty labels",
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedLabels: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil PVC pointer",
|
||||
pvc: (*v1.PersistentVolumeClaim)(nil),
|
||||
expectedLabels: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
s := &structuredVolume{}
|
||||
s.parsePVC(tc.pvc)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, s.pvcLabels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package resourcepolicies
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// VolumeFilterData bundles the volume data needed for volume policy filtering
|
||||
type VolumeFilterData struct {
|
||||
PersistentVolume *corev1.PersistentVolume
|
||||
PodVolume *corev1.Volume
|
||||
PVC *corev1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
// NewVolumeFilterData constructs a new VolumeFilterData instance.
|
||||
func NewVolumeFilterData(pv *corev1.PersistentVolume, podVol *corev1.Volume, pvc *corev1.PersistentVolumeClaim) VolumeFilterData {
|
||||
return VolumeFilterData{
|
||||
PersistentVolume: pv,
|
||||
PodVolume: podVol,
|
||||
PVC: pvc,
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
package resourcepolicies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestNewVolumeFilterData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pv *corev1.PersistentVolume
|
||||
podVol *corev1.Volume
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
expectedPVName string
|
||||
expectedPodName string
|
||||
expectedPVCName string
|
||||
}{
|
||||
{
|
||||
name: "all provided",
|
||||
pv: &corev1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-test",
|
||||
},
|
||||
},
|
||||
podVol: &corev1.Volume{
|
||||
Name: "pod-vol-test",
|
||||
},
|
||||
pvc: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-test",
|
||||
},
|
||||
},
|
||||
expectedPVName: "pv-test",
|
||||
expectedPodName: "pod-vol-test",
|
||||
expectedPVCName: "pvc-test",
|
||||
},
|
||||
{
|
||||
name: "only PV provided",
|
||||
pv: &corev1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-only",
|
||||
},
|
||||
},
|
||||
podVol: nil,
|
||||
pvc: nil,
|
||||
expectedPVName: "pv-only",
|
||||
expectedPodName: "",
|
||||
expectedPVCName: "",
|
||||
},
|
||||
{
|
||||
name: "only PodVolume provided",
|
||||
pv: nil,
|
||||
podVol: &corev1.Volume{
|
||||
Name: "pod-only",
|
||||
},
|
||||
pvc: nil,
|
||||
expectedPVName: "",
|
||||
expectedPodName: "pod-only",
|
||||
expectedPVCName: "",
|
||||
},
|
||||
{
|
||||
name: "only PVC provided",
|
||||
pv: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-only",
|
||||
},
|
||||
},
|
||||
expectedPVName: "",
|
||||
expectedPodName: "",
|
||||
expectedPVCName: "pvc-only",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
vfd := NewVolumeFilterData(tc.pv, tc.podVol, tc.pvc)
|
||||
if tc.expectedPVName != "" {
|
||||
assert.NotNil(t, vfd.PersistentVolume)
|
||||
assert.Equal(t, tc.expectedPVName, vfd.PersistentVolume.Name)
|
||||
} else {
|
||||
assert.Nil(t, vfd.PersistentVolume)
|
||||
}
|
||||
if tc.expectedPodName != "" {
|
||||
assert.NotNil(t, vfd.PodVolume)
|
||||
assert.Equal(t, tc.expectedPodName, vfd.PodVolume.Name)
|
||||
} else {
|
||||
assert.Nil(t, vfd.PodVolume)
|
||||
}
|
||||
if tc.expectedPVCName != "" {
|
||||
assert.NotNil(t, vfd.PVC)
|
||||
assert.Equal(t, tc.expectedPVCName, vfd.PVC.Name)
|
||||
} else {
|
||||
assert.Nil(t, vfd.PVC)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
@@ -50,7 +48,6 @@ type structuredVolume struct {
|
||||
nfs *nFSVolumeSource
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
pvcLabels map[string]string
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
@@ -63,18 +60,12 @@ func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
|
||||
csi := pv.Spec.CSI
|
||||
if csi != nil {
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes}
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver}
|
||||
}
|
||||
|
||||
s.volumeType = getVolumeTypeFromPV(pv)
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePVC(pvc *corev1api.PersistentVolumeClaim) {
|
||||
if pvc != nil && len(pvc.GetLabels()) > 0 {
|
||||
s.pvcLabels = pvc.Labels
|
||||
}
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePodVolume(vol *corev1api.Volume) {
|
||||
nfs := vol.NFS
|
||||
if nfs != nil {
|
||||
@@ -83,33 +74,12 @@ func (s *structuredVolume) parsePodVolume(vol *corev1api.Volume) {
|
||||
|
||||
csi := vol.CSI
|
||||
if csi != nil {
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes}
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver}
|
||||
}
|
||||
|
||||
s.volumeType = getVolumeTypeFromVolume(vol)
|
||||
}
|
||||
|
||||
// pvcLabelsCondition defines a condition that matches if the PVC's labels contain all the provided key/value pairs.
|
||||
type pvcLabelsCondition struct {
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) match(v *structuredVolume) bool {
|
||||
// No labels specified: always match.
|
||||
if len(c.labels) == 0 {
|
||||
return true
|
||||
}
|
||||
if v.pvcLabels == nil {
|
||||
return false
|
||||
}
|
||||
selector := labels.SelectorFromSet(c.labels)
|
||||
return selector.Matches(labels.Set(v.pvcLabels))
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type capacityCondition struct {
|
||||
capacity capacity
|
||||
}
|
||||
@@ -190,25 +160,7 @@ func (c *csiCondition) match(v *structuredVolume) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.csi.Driver != v.csi.Driver {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(c.csi.VolumeAttributes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(v.csi.VolumeAttributes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, value := range c.csi.VolumeAttributes {
|
||||
if value != v.csi.VolumeAttributes[key] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return c.csi.Driver == v.csi.Driver
|
||||
}
|
||||
|
||||
// parseCapacity parse string into capacity format
|
||||
@@ -256,9 +208,9 @@ func (c *capacity) isInRange(y resource.Quantity) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// unmarshalVolConditions parse map[string]any into volumeConditions format
|
||||
// unmarshalVolConditions parse map[string]interface{} into volumeConditions format
|
||||
// and validate key fields of the map.
|
||||
func unmarshalVolConditions(con map[string]any) (*volumeConditions, error) {
|
||||
func unmarshalVolConditions(con map[string]interface{}) (*volumeConditions, error) {
|
||||
volConditons := &volumeConditions{}
|
||||
buffer := new(bytes.Buffer)
|
||||
err := yaml.NewEncoder(buffer).Encode(con)
|
||||
|
||||
@@ -25,114 +25,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func setStructuredVolume(capacity resource.Quantity, sc string, nfs *nFSVolumeSource, csi *csiVolumeSource, pvcLabels map[string]string) *structuredVolume {
|
||||
func setStructuredVolume(capacity resource.Quantity, sc string, nfs *nFSVolumeSource, csi *csiVolumeSource) *structuredVolume {
|
||||
return &structuredVolume{
|
||||
capacity: capacity,
|
||||
storageClass: sc,
|
||||
nfs: nfs,
|
||||
csi: csi,
|
||||
pvcLabels: pvcLabels,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCLabelsMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
condition *pvcLabelsCondition
|
||||
volume *structuredVolume
|
||||
expectedMatch bool
|
||||
}{
|
||||
{
|
||||
name: "match exact label (single)",
|
||||
condition: &pvcLabelsCondition{
|
||||
labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
volume: setStructuredVolume(
|
||||
*resource.NewQuantity(0, resource.BinarySI),
|
||||
"any",
|
||||
nil,
|
||||
nil,
|
||||
map[string]string{"environment": "production", "app": "database"},
|
||||
),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match exact label (multiple)",
|
||||
condition: &pvcLabelsCondition{
|
||||
labels: map[string]string{"environment": "production", "app": "database"},
|
||||
},
|
||||
volume: setStructuredVolume(
|
||||
*resource.NewQuantity(0, resource.BinarySI),
|
||||
"any",
|
||||
nil,
|
||||
nil,
|
||||
map[string]string{"environment": "production", "app": "database"},
|
||||
),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "mismatch label value",
|
||||
condition: &pvcLabelsCondition{
|
||||
labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
volume: setStructuredVolume(
|
||||
*resource.NewQuantity(0, resource.BinarySI),
|
||||
"any",
|
||||
nil,
|
||||
nil,
|
||||
map[string]string{"environment": "staging", "app": "database"},
|
||||
),
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "missing label key",
|
||||
condition: &pvcLabelsCondition{
|
||||
labels: map[string]string{"environment": "production", "region": "us-west"},
|
||||
},
|
||||
volume: setStructuredVolume(
|
||||
*resource.NewQuantity(0, resource.BinarySI),
|
||||
"any",
|
||||
nil,
|
||||
nil,
|
||||
map[string]string{"environment": "production", "app": "database"},
|
||||
),
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "empty condition always matches",
|
||||
condition: &pvcLabelsCondition{
|
||||
labels: map[string]string{},
|
||||
},
|
||||
volume: setStructuredVolume(
|
||||
*resource.NewQuantity(0, resource.BinarySI),
|
||||
"any",
|
||||
nil,
|
||||
nil,
|
||||
map[string]string{"environment": "staging"},
|
||||
),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "nil pvcLabels fails non-empty condition",
|
||||
condition: &pvcLabelsCondition{
|
||||
labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
volume: setStructuredVolume(
|
||||
*resource.NewQuantity(0, resource.BinarySI),
|
||||
"any",
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
expectedMatch: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
match := tt.condition.match(tt.volume)
|
||||
assert.Equal(t, tt.expectedMatch, match, "expected match %v, got %v", tt.expectedMatch, match)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,6 +48,7 @@ func TestParseCapacity(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test // capture range variable
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
actual, actualErr := parseCapacity(test.input)
|
||||
if test.expected != emptyCapacity {
|
||||
@@ -180,6 +79,7 @@ func TestCapacityIsInRange(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test // capture range variable
|
||||
t.Run(fmt.Sprintf("%v with %v", test.capacity, test.quantity), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -200,31 +100,31 @@ func TestStorageClassConditionMatch(t *testing.T) {
|
||||
{
|
||||
name: "match single storage class",
|
||||
condition: &storageClassCondition{[]string{"gp2"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "gp2", nil, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "gp2", nil, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "match multiple storage classes",
|
||||
condition: &storageClassCondition{[]string{"gp2", "ebs-sc"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "gp2", nil, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "gp2", nil, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "mismatch storage class",
|
||||
condition: &storageClassCondition{[]string{"gp2"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "ebs-sc", nil, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "ebs-sc", nil, nil),
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "empty storage class",
|
||||
condition: &storageClassCondition{[]string{}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "ebs-sc", nil, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "ebs-sc", nil, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty volume storage class",
|
||||
condition: &storageClassCondition{[]string{"gp2"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, nil),
|
||||
expectedMatch: false,
|
||||
},
|
||||
}
|
||||
@@ -249,37 +149,37 @@ func TestNFSConditionMatch(t *testing.T) {
|
||||
{
|
||||
name: "match nfs condition",
|
||||
condition: &nfsCondition{&nFSVolumeSource{Server: "192.168.10.20"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20"}, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20"}, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty nfs condition",
|
||||
condition: &nfsCondition{nil},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20"}, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20"}, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty nfs server and path condition",
|
||||
condition: &nfsCondition{&nFSVolumeSource{Server: "", Path: ""}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20"}, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20"}, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "server mismatch",
|
||||
condition: &nfsCondition{&nFSVolumeSource{Server: "192.168.10.20", Path: ""}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: ""}, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: ""}, nil),
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "empty nfs server condition",
|
||||
condition: &nfsCondition{&nFSVolumeSource{Path: "/mnt/data"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20", Path: "/mnt/data"}, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: "192.168.10.20", Path: "/mnt/data"}, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty nfs volume",
|
||||
condition: &nfsCondition{&nFSVolumeSource{Server: "192.168.10.20"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, nil, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, nil),
|
||||
expectedMatch: false,
|
||||
},
|
||||
}
|
||||
@@ -301,45 +201,21 @@ func TestCSIConditionMatch(t *testing.T) {
|
||||
expectedMatch bool
|
||||
}{
|
||||
{
|
||||
name: "match csi driver condition",
|
||||
name: "match csi condition",
|
||||
condition: &csiCondition{&csiVolumeSource{Driver: "test"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty csi driver condition",
|
||||
name: "empty csi condition",
|
||||
condition: &csiCondition{nil},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty csi driver volume",
|
||||
name: "empty csi volume",
|
||||
condition: &csiCondition{&csiVolumeSource{Driver: "test"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{}, nil),
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "match csi volumeAttributes condition",
|
||||
condition: &csiCondition{&csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty csi volumeAttributes condition",
|
||||
condition: &csiCondition{&csiVolumeSource{Driver: "test"}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}, nil),
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "empty csi volumeAttributes volume",
|
||||
condition: &csiCondition{&csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": ""}}, nil),
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "empty csi volumeAttributes volume",
|
||||
condition: &csiCondition{&csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}, nil),
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{}),
|
||||
expectedMatch: false,
|
||||
},
|
||||
}
|
||||
@@ -356,12 +232,12 @@ func TestCSIConditionMatch(t *testing.T) {
|
||||
func TestUnmarshalVolumeConditions(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input map[string]any
|
||||
input map[string]interface{}
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Valid input",
|
||||
input: map[string]any{
|
||||
input: map[string]interface{}{
|
||||
"capacity": "1Gi,10Gi",
|
||||
"storageClass": []string{
|
||||
"gp2",
|
||||
@@ -375,61 +251,32 @@ func TestUnmarshalVolumeConditions(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Invalid input: invalid capacity filed name",
|
||||
input: map[string]any{
|
||||
input: map[string]interface{}{
|
||||
"Capacity": "1Gi,10Gi",
|
||||
},
|
||||
expectedError: "field Capacity not found",
|
||||
},
|
||||
{
|
||||
name: "Invalid input: invalid storage class format",
|
||||
input: map[string]any{
|
||||
input: map[string]interface{}{
|
||||
"storageClass": "ebs-sc",
|
||||
},
|
||||
expectedError: "str `ebs-sc` into []string",
|
||||
},
|
||||
{
|
||||
name: "Invalid input: invalid csi format",
|
||||
input: map[string]any{
|
||||
input: map[string]interface{}{
|
||||
"csi": "csi.driver",
|
||||
},
|
||||
expectedError: "str `csi.driver` into resourcepolicies.csiVolumeSource",
|
||||
},
|
||||
{
|
||||
name: "Invalid input: unknown field",
|
||||
input: map[string]any{
|
||||
input: map[string]interface{}{
|
||||
"unknown": "foo",
|
||||
},
|
||||
expectedError: "field unknown not found in type",
|
||||
},
|
||||
{
|
||||
name: "Valid pvcLabels input as map[string]string",
|
||||
input: map[string]any{
|
||||
"capacity": "1Gi,10Gi",
|
||||
"pvcLabels": map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "Valid pvcLabels input as map[string]any",
|
||||
input: map[string]any{
|
||||
"capacity": "1Gi,10Gi",
|
||||
"pvcLabels": map[string]any{
|
||||
"environment": "production",
|
||||
"app": "database",
|
||||
},
|
||||
},
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "Invalid pvcLabels input: not a map",
|
||||
input: map[string]any{
|
||||
"capacity": "1Gi,10Gi",
|
||||
"pvcLabels": "production",
|
||||
},
|
||||
expectedError: "!!str `production` into map[string]string",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -455,8 +302,7 @@ func TestParsePodVolume(t *testing.T) {
|
||||
}
|
||||
csiVolume := corev1api.Volume{}
|
||||
csiVolume.CSI = &corev1api.CSIVolumeSource{
|
||||
Driver: "csi.example.com",
|
||||
VolumeAttributes: map[string]string{"protocol": "nfs"},
|
||||
Driver: "csi.example.com",
|
||||
}
|
||||
emptyVolume := corev1api.Volume{}
|
||||
|
||||
@@ -475,7 +321,7 @@ func TestParsePodVolume(t *testing.T) {
|
||||
{
|
||||
name: "CSI volume",
|
||||
inputVolume: &csiVolume,
|
||||
expectedCSI: &csiVolumeSource{Driver: "csi.example.com", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
expectedCSI: &csiVolumeSource{Driver: "csi.example.com"},
|
||||
},
|
||||
{
|
||||
name: "Empty volume",
|
||||
@@ -502,19 +348,9 @@ func TestParsePodVolume(t *testing.T) {
|
||||
if tc.expectedCSI != nil {
|
||||
if structuredVolume.csi == nil {
|
||||
t.Errorf("Expected a non-nil CSI volume source")
|
||||
} else if tc.expectedCSI.Driver != structuredVolume.csi.Driver {
|
||||
} else if *tc.expectedCSI != *structuredVolume.csi {
|
||||
t.Errorf("CSI volume source does not match expected value")
|
||||
}
|
||||
// Check volumeAttributes
|
||||
if len(tc.expectedCSI.VolumeAttributes) != len(structuredVolume.csi.VolumeAttributes) {
|
||||
t.Errorf("CSI volume attributes does not match expected value")
|
||||
} else {
|
||||
for k, v := range tc.expectedCSI.VolumeAttributes {
|
||||
if structuredVolume.csi.VolumeAttributes[k] != v {
|
||||
t.Errorf("CSI volume attributes does not match expected value")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -527,7 +363,7 @@ func TestParsePV(t *testing.T) {
|
||||
nfsVolume.Spec.NFS = &corev1api.NFSVolumeSource{Server: "nfs.example.com", Path: "/exports/data"}
|
||||
csiVolume := corev1api.PersistentVolume{}
|
||||
csiVolume.Spec.Capacity = corev1api.ResourceList{corev1api.ResourceStorage: resource.MustParse("2Gi")}
|
||||
csiVolume.Spec.CSI = &corev1api.CSIPersistentVolumeSource{Driver: "csi.example.com", VolumeAttributes: map[string]string{"protocol": "nfs"}}
|
||||
csiVolume.Spec.CSI = &corev1api.CSIPersistentVolumeSource{Driver: "csi.example.com"}
|
||||
emptyVolume := corev1api.PersistentVolume{}
|
||||
|
||||
// Test cases
|
||||
@@ -547,7 +383,7 @@ func TestParsePV(t *testing.T) {
|
||||
name: "CSI volume",
|
||||
inputVolume: &csiVolume,
|
||||
expectedNFS: nil,
|
||||
expectedCSI: &csiVolumeSource{Driver: "csi.example.com", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
expectedCSI: &csiVolumeSource{Driver: "csi.example.com"},
|
||||
},
|
||||
{
|
||||
name: "Empty volume",
|
||||
@@ -579,19 +415,9 @@ func TestParsePV(t *testing.T) {
|
||||
if tc.expectedCSI != nil {
|
||||
if structuredVolume.csi == nil {
|
||||
t.Errorf("Expected a non-nil CSI volume source")
|
||||
} else if tc.expectedCSI.Driver != structuredVolume.csi.Driver {
|
||||
} else if *tc.expectedCSI != *structuredVolume.csi {
|
||||
t.Errorf("CSI volume source does not match expected value")
|
||||
}
|
||||
// Check volumeAttributes
|
||||
if len(tc.expectedCSI.VolumeAttributes) != len(structuredVolume.csi.VolumeAttributes) {
|
||||
t.Errorf("CSI volume attributes does not match expected value")
|
||||
} else {
|
||||
for k, v := range tc.expectedCSI.VolumeAttributes {
|
||||
if structuredVolume.csi.VolumeAttributes[k] != v {
|
||||
t.Errorf("CSI volume attributes does not match expected value")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -27,8 +27,6 @@ const currentSupportDataVersion = "v1"
|
||||
|
||||
type csiVolumeSource struct {
|
||||
Driver string `yaml:"driver,omitempty"`
|
||||
// CSI volume attributes
|
||||
VolumeAttributes map[string]string `yaml:"volumeAttributes,omitempty"`
|
||||
}
|
||||
|
||||
type nFSVolumeSource struct {
|
||||
@@ -45,7 +43,6 @@ type volumeConditions struct {
|
||||
NFS *nFSVolumeSource `yaml:"nfs,omitempty"`
|
||||
CSI *csiVolumeSource `yaml:"csi,omitempty"`
|
||||
VolumeTypes []SupportedVolume `yaml:"volumeTypes,omitempty"`
|
||||
PVCLabels map[string]string `yaml:"pvcLabels,omitempty"`
|
||||
}
|
||||
|
||||
func (c *capacityCondition) validate() error {
|
||||
@@ -71,15 +68,12 @@ func (c *nfsCondition) validate() error {
|
||||
}
|
||||
|
||||
func (c *csiCondition) validate() error {
|
||||
if c != nil && c.csi != nil && c.csi.Driver == "" && c.csi.VolumeAttributes != nil {
|
||||
return errors.New("csi driver should not be empty when filtering by volume attributes")
|
||||
}
|
||||
|
||||
// validate by yamlv3
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeStruct restric validate the keys in decoded mappings to exist as fields in the struct being decoded into
|
||||
func decodeStruct(r io.Reader, s any) error {
|
||||
func decodeStruct(r io.Reader, s interface{}) error {
|
||||
dec := yaml.NewDecoder(r)
|
||||
dec.KnownFields(true)
|
||||
return dec.Decode(s)
|
||||
|
||||
@@ -94,12 +94,12 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"unknown": "",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
@@ -115,11 +115,11 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
@@ -135,11 +135,11 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": "ebs-sc",
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
@@ -155,7 +155,7 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": "aws.efs.csi.driver",
|
||||
@@ -165,47 +165,6 @@ func TestValidate(t *testing.T) {
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "error format of csi driver",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": []string{"aws.efs.csi.driver"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "error format of csi driver volumeAttributes",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
"volumeAttributes": "test",
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "unsupported version",
|
||||
res: &ResourcePolicies{
|
||||
@@ -213,10 +172,10 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
@@ -232,10 +191,10 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "unsupported"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
@@ -251,7 +210,7 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"nfs": "aws.efs.csi.driver",
|
||||
@@ -261,65 +220,6 @@ func TestValidate(t *testing.T) {
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "supported format volume policies only csi driver",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "unsupported format volume policies only csi volumeattributes",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"volumeAttributes": map[string]string{
|
||||
"key1": "value1",
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "supported format volume policies with csi driver and volumeattributes",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
"volumeAttributes": map[string]string{
|
||||
"key1": "value1",
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "supported format volume policies",
|
||||
res: &ResourcePolicies{
|
||||
@@ -327,15 +227,15 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
"nfs": any(
|
||||
map[string]any{
|
||||
"nfs": interface{}(
|
||||
map[string]interface{}{
|
||||
"server": "192.168.20.90",
|
||||
"path": "/mnt/data/",
|
||||
}),
|
||||
@@ -352,15 +252,15 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "snapshot"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
"nfs": any(
|
||||
map[string]any{
|
||||
"nfs": interface{}(
|
||||
map[string]interface{}{
|
||||
"server": "192.168.20.90",
|
||||
"path": "/mnt/data/",
|
||||
}),
|
||||
@@ -377,15 +277,15 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "fs-backup"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
"nfs": any(
|
||||
map[string]any{
|
||||
"nfs": interface{}(
|
||||
map[string]interface{}{
|
||||
"server": "192.168.20.90",
|
||||
"path": "/mnt/data/",
|
||||
}),
|
||||
@@ -402,15 +302,15 @@ func TestValidate(t *testing.T) {
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: Snapshot},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: FSBackup},
|
||||
Conditions: map[string]any{
|
||||
"nfs": any(
|
||||
map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"nfs": interface{}(
|
||||
map[string]interface{}{
|
||||
"server": "192.168.20.90",
|
||||
"path": "/mnt/data/",
|
||||
}),
|
||||
@@ -420,39 +320,6 @@ func TestValidate(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "supported format volume policies with pvcLabels (valid map)",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"pvcLabels": map[string]string{
|
||||
"environment": "production",
|
||||
"app": "database",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "error format volume policies with pvcLabels (not a map)",
|
||||
res: &ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"pvcLabels": "production",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (rp *MockRestartableProcess) ResetIfNeeded() error {
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (rp *MockRestartableProcess) GetByKindAndName(key process.KindAndName) (any, error) {
|
||||
func (rp *MockRestartableProcess) GetByKindAndName(key process.KindAndName) (interface{}, error) {
|
||||
args := rp.Called(key)
|
||||
return args.Get(0), args.Error(1)
|
||||
}
|
||||
@@ -57,21 +57,21 @@ func (rp *MockRestartableProcess) Stop() {
|
||||
|
||||
type RestartableDelegateTest struct {
|
||||
Function string
|
||||
Inputs []any
|
||||
ExpectedErrorOutputs []any
|
||||
ExpectedDelegateOutputs []any
|
||||
Inputs []interface{}
|
||||
ExpectedErrorOutputs []interface{}
|
||||
ExpectedDelegateOutputs []interface{}
|
||||
}
|
||||
|
||||
type Mockable interface {
|
||||
Test(t mock.TestingT)
|
||||
On(method string, args ...any) *mock.Call
|
||||
On(method string, args ...interface{}) *mock.Call
|
||||
AssertExpectations(t mock.TestingT) bool
|
||||
}
|
||||
|
||||
func RunRestartableDelegateTests(
|
||||
t *testing.T,
|
||||
kind common.PluginKind,
|
||||
newRestartable func(key process.KindAndName, p process.RestartableProcess) any,
|
||||
newRestartable func(key process.KindAndName, p process.RestartableProcess) interface{},
|
||||
newMock func() Mockable,
|
||||
tests ...RestartableDelegateTest,
|
||||
) {
|
||||
@@ -92,7 +92,7 @@ func RunRestartableDelegateTests(
|
||||
method := reflect.ValueOf(r).MethodByName(tc.Function)
|
||||
require.NotEmpty(t, method)
|
||||
|
||||
// Convert the test case inputs ([]any) to []reflect.Value
|
||||
// Convert the test case inputs ([]interface{}) to []reflect.Value
|
||||
var inputValues []reflect.Value
|
||||
for i := range tc.Inputs {
|
||||
inputValues = append(inputValues, reflect.ValueOf(tc.Inputs[i]))
|
||||
@@ -102,7 +102,7 @@ func RunRestartableDelegateTests(
|
||||
actual := method.Call(inputValues)
|
||||
|
||||
// This Function asserts that the actual outputs match the expected outputs
|
||||
checkOutputs := func(expected []any, actual []reflect.Value) {
|
||||
checkOutputs := func(expected []interface{}, actual []reflect.Value) {
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for i := range actual {
|
||||
|
||||
@@ -18,6 +18,7 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -97,7 +98,7 @@ func GetDefaultBackupStorageLocations(ctx context.Context, kbClient client.Clien
|
||||
locations := new(velerov1api.BackupStorageLocationList)
|
||||
defaultLocations := new(velerov1api.BackupStorageLocationList)
|
||||
if err := kbClient.List(context.Background(), locations, &client.ListOptions{Namespace: namespace}); err != nil {
|
||||
return defaultLocations, errors.Wrapf(err, "failed to list backup storage locations in namespace %s", namespace)
|
||||
return defaultLocations, errors.Wrapf(err, fmt.Sprintf("failed to list backup storage locations in namespace %s", namespace))
|
||||
}
|
||||
|
||||
for _, location := range locations.Items {
|
||||
|
||||
@@ -43,3 +43,9 @@ func ImageTag() string {
|
||||
func DefaultVeleroImage() string {
|
||||
return fmt.Sprintf("%s/%s:%s", imageRegistry(), "velero", ImageTag())
|
||||
}
|
||||
|
||||
// DefaultRestoreHelperImage returns the default container image to use for the restore helper
|
||||
// for this version of Velero.
|
||||
func DefaultRestoreHelperImage() string {
|
||||
return fmt.Sprintf("%s/%s:%s", imageRegistry(), "velero-restore-helper", ImageTag())
|
||||
}
|
||||
|
||||
@@ -134,3 +134,7 @@ func testDefaultImage(t *testing.T, defaultImageFn func() string, imageName stri
|
||||
func TestDefaultVeleroImage(t *testing.T) {
|
||||
testDefaultImage(t, DefaultVeleroImage, "velero")
|
||||
}
|
||||
|
||||
func TestDefaultRestoreHelperImage(t *testing.T) {
|
||||
testDefaultImage(t, DefaultRestoreHelperImage, "velero-restore-helper")
|
||||
}
|
||||
|
||||
@@ -49,7 +49,13 @@ const (
|
||||
|
||||
const (
|
||||
FieldValueIsUnknown string = "unknown"
|
||||
kopia string = "kopia"
|
||||
veleroDatamover string = "velero"
|
||||
|
||||
//TODO reuse these constants from csi-plugin-for-velero after it's merged into the same repo
|
||||
|
||||
CSIDriverNameAnnotation = "velero.io/csi-driver-name"
|
||||
VolumeSnapshotHandleAnnotation = "velero.io/csi-volumesnapshot-handle"
|
||||
)
|
||||
|
||||
type BackupVolumeInfo struct {
|
||||
@@ -641,7 +647,7 @@ func (v *BackupVolumesInformation) generateVolumeInfoFromDataUpload() {
|
||||
},
|
||||
SnapshotDataMovementInfo: &SnapshotDataMovementInfo{
|
||||
DataMover: dataMover,
|
||||
UploaderType: velerov1api.BackupRepositoryTypeKopia,
|
||||
UploaderType: kopia,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
Phase: dataUpload.Status.Phase,
|
||||
},
|
||||
@@ -830,7 +836,7 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
|
||||
continue
|
||||
}
|
||||
pvcNS, pvcName := n[0], n[1]
|
||||
var restoreSize int64
|
||||
var restoreSize int64 = 0
|
||||
if csiSnapshot.Status != nil && csiSnapshot.Status.RestoreSize != nil {
|
||||
restoreSize = csiSnapshot.Status.RestoreSize.Value()
|
||||
}
|
||||
@@ -838,16 +844,15 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
|
||||
if csiSnapshot.Spec.Source.VolumeSnapshotContentName != nil {
|
||||
vscName = *csiSnapshot.Spec.Source.VolumeSnapshotContentName
|
||||
}
|
||||
|
||||
volumeInfo := &RestoreVolumeInfo{
|
||||
PVCNamespace: pvcNS,
|
||||
PVCName: pvcName,
|
||||
SnapshotDataMoved: false,
|
||||
RestoreMethod: CSISnapshot,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
SnapshotHandle: csiSnapshot.Annotations[velerov1api.VolumeSnapshotHandleAnnotation],
|
||||
SnapshotHandle: csiSnapshot.Annotations[VolumeSnapshotHandleAnnotation],
|
||||
Size: restoreSize,
|
||||
Driver: csiSnapshot.Annotations[velerov1api.DriverNameAnnotation],
|
||||
Driver: csiSnapshot.Annotations[CSIDriverNameAnnotation],
|
||||
VSCName: vscName,
|
||||
},
|
||||
}
|
||||
@@ -884,7 +889,7 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
|
||||
RestoreMethod: CSISnapshot,
|
||||
SnapshotDataMovementInfo: &SnapshotDataMovementInfo{
|
||||
DataMover: dataMover,
|
||||
UploaderType: velerov1api.BackupRepositoryTypeKopia,
|
||||
UploaderType: kopia,
|
||||
SnapshotHandle: dd.Spec.SnapshotID,
|
||||
OperationID: operationID,
|
||||
},
|
||||
|
||||
@@ -1170,8 +1170,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
||||
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
|
||||
"testNS/testPVC": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations(velerov1api.VolumeSnapshotHandleAnnotation, "csi-snap-001",
|
||||
velerov1api.DriverNameAnnotation, "test-csi-driver"),
|
||||
builder.WithAnnotations(VolumeSnapshotHandleAnnotation, "csi-snap-001",
|
||||
CSIDriverNameAnnotation, "test-csi-driver"),
|
||||
).SourceVolumeSnapshotContentName("test-vsc-001").
|
||||
Status().RestoreSize("1Gi").Result(),
|
||||
},
|
||||
@@ -1269,7 +1269,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
||||
SnapshotDataMoved: true,
|
||||
SnapshotDataMovementInfo: &SnapshotDataMovementInfo{
|
||||
DataMover: "velero",
|
||||
UploaderType: velerov1api.BackupRepositoryTypeKopia,
|
||||
UploaderType: kopia,
|
||||
SnapshotHandle: "dd-snap-001",
|
||||
OperationID: "dd-operation-001",
|
||||
},
|
||||
@@ -1282,7 +1282,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
||||
SnapshotDataMoved: true,
|
||||
SnapshotDataMovementInfo: &SnapshotDataMovementInfo{
|
||||
DataMover: "velero",
|
||||
UploaderType: velerov1api.BackupRepositoryTypeKopia,
|
||||
UploaderType: kopia,
|
||||
SnapshotHandle: "dd-snap-002",
|
||||
OperationID: "dd-operation-002",
|
||||
},
|
||||
|
||||
@@ -81,8 +81,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
}
|
||||
|
||||
if v.volumePolicy != nil {
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
action, err := v.volumePolicy.GetMatchAction(pv)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
|
||||
return false, err
|
||||
@@ -129,7 +128,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
|
||||
if !boolptr.IsSetToFalse(v.snapshotVolumes) {
|
||||
// If the backup.Spec.SnapshotVolumes is not set, or set to true, then should take the snapshot.
|
||||
v.logger.Infof("performing snapshot action for pv %s as the snapshotVolumes is not set to false", pv.Name)
|
||||
v.logger.Infof("performing snapshot action for pv %s as the snapshotVolumes is not set to false")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -144,12 +143,10 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
}
|
||||
|
||||
if v.volumePolicy != nil {
|
||||
var resource any
|
||||
var err error
|
||||
var resource interface{}
|
||||
resource = &volume
|
||||
var pvc = &corev1api.PersistentVolumeClaim{}
|
||||
if volume.VolumeSource.PersistentVolumeClaim != nil {
|
||||
pvc, err = kubeutil.GetPVCForPodVolume(&volume, &pod, v.client)
|
||||
pvc, err := kubeutil.GetPVCForPodVolume(&volume, &pod, v.client)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
|
||||
return false, err
|
||||
@@ -161,13 +158,7 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
||||
}
|
||||
}
|
||||
|
||||
pv, podVolume, err := v.getVolumeFromResource(resource)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, podVolume, pvc)
|
||||
action, err := v.volumePolicy.GetMatchAction(vfd)
|
||||
action, err := v.volumePolicy.GetMatchAction(resource)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Error("fail to get VolumePolicy match action for volume")
|
||||
return false, err
|
||||
@@ -256,12 +247,3 @@ func (v *volumeHelperImpl) shouldIncludeVolumeInBackup(vol corev1api.Volume) boo
|
||||
}
|
||||
return includeVolumeInBackup
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) getVolumeFromResource(resource any) (*corev1api.PersistentVolume, *corev1api.Volume, error) {
|
||||
if pv, ok := resource.(*corev1api.PersistentVolume); ok {
|
||||
return pv, nil, nil
|
||||
} else if podVol, ok := resource.(*corev1api.Volume); ok {
|
||||
return nil, podVol, nil
|
||||
}
|
||||
return nil, nil, fmt.Errorf("resource is not a PersistentVolume or Volume")
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -78,7 +78,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -99,7 +99,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -121,7 +121,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -152,7 +152,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -186,7 +186,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -220,7 +220,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -253,7 +253,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -314,7 +314,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
fakeClient.Create(context.Background(), tc.pod)
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
var p *resourcepolicies.Policies = nil
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
@@ -465,7 +465,7 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -540,7 +540,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -566,7 +566,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"volumeTypes": []string{"emptyDir"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -600,7 +600,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -635,7 +635,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp3-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
@@ -679,7 +679,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
fakeClient.Create(context.Background(), tc.pod)
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
var p *resourcepolicies.Policies = nil
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
@@ -706,37 +706,3 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeFromResource(t *testing.T) {
|
||||
helper := &volumeHelperImpl{}
|
||||
|
||||
t.Run("PersistentVolume input", func(t *testing.T) {
|
||||
pv := &corev1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
}
|
||||
outPV, outPod, err := helper.getVolumeFromResource(pv)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, outPV)
|
||||
assert.Nil(t, outPod)
|
||||
assert.Equal(t, "test-pv", outPV.Name)
|
||||
})
|
||||
|
||||
t.Run("Volume input", func(t *testing.T) {
|
||||
vol := &corev1.Volume{
|
||||
Name: "test-volume",
|
||||
}
|
||||
outPV, outPod, err := helper.getVolumeFromResource(vol)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, outPV)
|
||||
assert.NotNil(t, outPod)
|
||||
assert.Equal(t, "test-volume", outPod.Name)
|
||||
})
|
||||
|
||||
t.Run("Invalid input", func(t *testing.T) {
|
||||
_, _, err := helper.getVolumeFromResource("invalid")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "resource is not a PersistentVolume or Volume")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -71,43 +71,10 @@ type BackupRepositoryStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastMaintenanceTime is the last time repo maintenance succeeded.
|
||||
// LastMaintenanceTime is the last time maintenance was run.
|
||||
// +optional
|
||||
// +nullable
|
||||
LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"`
|
||||
|
||||
// RecentMaintenance is status of the recent repo maintenance.
|
||||
// +optional
|
||||
RecentMaintenance []BackupRepositoryMaintenanceStatus `json:"recentMaintenance,omitempty"`
|
||||
}
|
||||
|
||||
// BackupRepositoryMaintenanceResult represents the result of a repo maintenance.
|
||||
// +kubebuilder:validation:Enum=Succeeded;Failed
|
||||
type BackupRepositoryMaintenanceResult string
|
||||
|
||||
const (
|
||||
BackupRepositoryMaintenanceSucceeded BackupRepositoryMaintenanceResult = "Succeeded"
|
||||
BackupRepositoryMaintenanceFailed BackupRepositoryMaintenanceResult = "Failed"
|
||||
)
|
||||
|
||||
type BackupRepositoryMaintenanceStatus struct {
|
||||
// Result is the result of the repo maintenance.
|
||||
// +optional
|
||||
Result BackupRepositoryMaintenanceResult `json:"result,omitempty"`
|
||||
|
||||
// StartTimestamp is the start time of the repo maintenance.
|
||||
// +optional
|
||||
// +nullable
|
||||
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
|
||||
|
||||
// CompleteTimestamp is the completion time of the repo maintenance.
|
||||
// +optional
|
||||
// +nullable
|
||||
CompleteTimestamp *metav1.Time `json:"completeTimestamp,omitempty"`
|
||||
|
||||
// Message is a message about the current status of the repo maintenance.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// TODO(2.0) After converting all resources to use the runtime-controller client,
|
||||
|
||||
@@ -165,29 +165,6 @@ func (in *BackupRepositoryList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupRepositoryMaintenanceStatus) DeepCopyInto(out *BackupRepositoryMaintenanceStatus) {
|
||||
*out = *in
|
||||
if in.StartTimestamp != nil {
|
||||
in, out := &in.StartTimestamp, &out.StartTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.CompleteTimestamp != nil {
|
||||
in, out := &in.CompleteTimestamp, &out.CompleteTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryMaintenanceStatus.
|
||||
func (in *BackupRepositoryMaintenanceStatus) DeepCopy() *BackupRepositoryMaintenanceStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupRepositoryMaintenanceStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) {
|
||||
*out = *in
|
||||
@@ -218,13 +195,6 @@ func (in *BackupRepositoryStatus) DeepCopyInto(out *BackupRepositoryStatus) {
|
||||
in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.RecentMaintenance != nil {
|
||||
in, out := &in.RecentMaintenance, &out.RecentMaintenance
|
||||
*out = make([]BackupRepositoryMaintenanceStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryStatus.
|
||||
|
||||
@@ -54,10 +54,6 @@ type DataDownloadSpec struct {
|
||||
// OperationTimeout specifies the time used to wait internal operations,
|
||||
// before returning error as timeout.
|
||||
OperationTimeout metav1.Duration `json:"operationTimeout"`
|
||||
|
||||
// NodeOS is OS of the node where the DataDownload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
}
|
||||
|
||||
// TargetVolumeSpec is the specification for a target PVC.
|
||||
@@ -119,16 +115,6 @@ type DataDownloadStatus struct {
|
||||
// Node is name of the node where the DataDownload is processed.
|
||||
// +optional
|
||||
Node string `json:"node,omitempty"`
|
||||
|
||||
// Node is name of the node where the DataUpload is prepared.
|
||||
// +optional
|
||||
AcceptedByNode string `json:"acceptedByNode,omitempty"`
|
||||
|
||||
// AcceptedTimestamp records the time the DataUpload is to be prepared.
|
||||
// The server's time is used for AcceptedTimestamp
|
||||
// +optional
|
||||
// +nullable
|
||||
AcceptedTimestamp *metav1.Time `json:"acceptedTimestamp,omitempty"`
|
||||
}
|
||||
|
||||
// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
|
||||
|
||||
@@ -96,16 +96,6 @@ const (
|
||||
DataUploadPhaseFailed DataUploadPhase = "Failed"
|
||||
)
|
||||
|
||||
// NodeOS represents OS of a node.
|
||||
// +kubebuilder:validation:Enum=auto;linux;windows
|
||||
type NodeOS string
|
||||
|
||||
const (
|
||||
NodeOSLinux NodeOS = "linux"
|
||||
NodeOSWindows NodeOS = "windows"
|
||||
NodeOSAuto NodeOS = "auto"
|
||||
)
|
||||
|
||||
// DataUploadStatus is the current status of a DataUpload.
|
||||
type DataUploadStatus struct {
|
||||
// Phase is the current state of the DataUpload.
|
||||
@@ -154,20 +144,6 @@ type DataUploadStatus struct {
|
||||
// Node is name of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
Node string `json:"node,omitempty"`
|
||||
|
||||
// NodeOS is OS of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
|
||||
// AcceptedByNode is name of the node where the DataUpload is prepared.
|
||||
// +optional
|
||||
AcceptedByNode string `json:"acceptedByNode,omitempty"`
|
||||
|
||||
// AcceptedTimestamp records the time the DataUpload is to be prepared.
|
||||
// The server's time is used for AcceptedTimestamp
|
||||
// +optional
|
||||
// +nullable
|
||||
AcceptedTimestamp *metav1.Time `json:"acceptedTimestamp,omitempty"`
|
||||
}
|
||||
|
||||
// TODO(2.0) After converting all resources to use the runttime-controller client,
|
||||
@@ -236,8 +212,4 @@ type DataUploadResult struct {
|
||||
// +optional
|
||||
// +nullable
|
||||
DataMoverResult *map[string]string `json:"dataMoverResult,omitempty"`
|
||||
|
||||
// NodeOS is OS of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
}
|
||||
|
||||
@@ -118,10 +118,6 @@ func (in *DataDownloadStatus) DeepCopyInto(out *DataDownloadStatus) {
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.Progress = in.Progress
|
||||
if in.AcceptedTimestamp != nil {
|
||||
in, out := &in.AcceptedTimestamp, &out.AcceptedTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadStatus.
|
||||
@@ -270,10 +266,6 @@ func (in *DataUploadStatus) DeepCopyInto(out *DataUploadStatus) {
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.Progress = in.Progress
|
||||
if in.AcceptedTimestamp != nil {
|
||||
in, out := &in.AcceptedTimestamp, &out.AcceptedTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadStatus.
|
||||
|
||||
@@ -36,9 +36,9 @@ import (
|
||||
|
||||
func TestBackupPVAction(t *testing.T) {
|
||||
pvc := &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{},
|
||||
"status": map[string]any{},
|
||||
Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{},
|
||||
"status": map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestBackupPVAction(t *testing.T) {
|
||||
|
||||
// empty spec.volumeName should result in no error
|
||||
// and no additional items
|
||||
pvc.Object["spec"].(map[string]any)["volumeName"] = ""
|
||||
pvc.Object["spec"].(map[string]interface{})["volumeName"] = ""
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, additional)
|
||||
@@ -116,28 +116,28 @@ func TestBackupPVAction(t *testing.T) {
|
||||
|
||||
// non-empty spec.volumeName when status.phase is empty
|
||||
// should result in no error and no additional items
|
||||
pvc.Object["spec"].(map[string]any)["volumeName"] = "myVolume"
|
||||
pvc.Object["spec"].(map[string]interface{})["volumeName"] = "myVolume"
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, additional)
|
||||
|
||||
// non-empty spec.volumeName when status.phase is 'Pending'
|
||||
// should result in no error and no additional items
|
||||
pvc.Object["status"].(map[string]any)["phase"] = corev1api.ClaimPending
|
||||
pvc.Object["status"].(map[string]interface{})["phase"] = corev1api.ClaimPending
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, additional)
|
||||
|
||||
// non-empty spec.volumeName when status.phase is 'Lost'
|
||||
// should result in no error and no additional items
|
||||
pvc.Object["status"].(map[string]any)["phase"] = corev1api.ClaimLost
|
||||
pvc.Object["status"].(map[string]interface{})["phase"] = corev1api.ClaimLost
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, additional)
|
||||
|
||||
// non-empty spec.volumeName when status.phase is 'Bound'
|
||||
// should result in no error and one additional item for the PV
|
||||
pvc.Object["status"].(map[string]any)["phase"] = corev1api.ClaimBound
|
||||
pvc.Object["status"].(map[string]interface{})["phase"] = corev1api.ClaimBound
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, additional, 1)
|
||||
@@ -145,7 +145,7 @@ func TestBackupPVAction(t *testing.T) {
|
||||
|
||||
// empty spec.volumeName when status.phase is 'Bound' should
|
||||
// result in no error and no additional items
|
||||
pvc.Object["spec"].(map[string]any)["volumeName"] = ""
|
||||
pvc.Object["spec"].(map[string]interface{})["volumeName"] = ""
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, additional)
|
||||
|
||||
@@ -19,7 +19,6 @@ package csi
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -485,7 +484,9 @@ func newDataUpload(
|
||||
if backup.Spec.UploaderConfig != nil &&
|
||||
backup.Spec.UploaderConfig.ParallelFilesUpload > 0 {
|
||||
dataUpload.Spec.DataMoverConfig = make(map[string]string)
|
||||
dataUpload.Spec.DataMoverConfig[uploaderUtil.ParallelFilesUpload] = strconv.Itoa(backup.Spec.UploaderConfig.ParallelFilesUpload)
|
||||
dataUpload.Spec.DataMoverConfig[uploaderUtil.ParallelFilesUpload] = fmt.Sprintf(
|
||||
"%d", backup.Spec.UploaderConfig.ParallelFilesUpload,
|
||||
)
|
||||
}
|
||||
|
||||
return dataUpload
|
||||
@@ -552,7 +553,7 @@ func cancelDataUpload(
|
||||
}
|
||||
|
||||
func NewPvcBackupItemAction(f client.Factory) plugincommon.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (any, error) {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
crClient, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
|
||||
@@ -124,8 +124,8 @@ func TestExecute(t *testing.T) {
|
||||
operationID: ".",
|
||||
expectedErr: nil,
|
||||
expectedPVC: builder.ForPersistentVolumeClaim("velero", "testPVC").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.MustIncludeAdditionalItemAnnotation, "true", velerov1api.DataUploadNameAnnotation, "velero/"),
|
||||
builder.WithLabels(velerov1api.BackupNameLabel, "test")).
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.MustIncludeAdditionalItemAnnotation, "true", velerov1api.DataUploadNameAnnotation, "velero/", velerov1api.VolumeSnapshotLabel, ""),
|
||||
builder.WithLabels(velerov1api.BackupNameLabel, "test", velerov1api.VolumeSnapshotLabel, "")).
|
||||
VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
|
||||
},
|
||||
{
|
||||
|
||||
@@ -131,13 +131,13 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
|
||||
p.log.
|
||||
WithField("Backup", fmt.Sprintf("%s/%s", backup.Namespace, backup.Name)).
|
||||
WithField("BackupPhase", backup.Status.Phase).Debugf("Cleaning VolumeSnapshots.")
|
||||
WithField("BackupPhase", backup.Status.Phase).Debugf("Clean VolumeSnapshots.")
|
||||
|
||||
if vsc == nil {
|
||||
vsc = &snapshotv1api.VolumeSnapshotContent{}
|
||||
}
|
||||
|
||||
csi.DeleteReadyVolumeSnapshot(*vs, *vsc, p.crClient, p.log)
|
||||
csi.DeleteVolumeSnapshot(*vs, *vsc, backup, p.crClient, p.log)
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
@@ -164,7 +164,6 @@ func (p *volumeSnapshotBackupItemAction) Execute(
|
||||
annotations[velerov1api.VolumeSnapshotHandleAnnotation] = *vsc.Status.SnapshotHandle
|
||||
annotations[velerov1api.DriverNameAnnotation] = vsc.Spec.Driver
|
||||
}
|
||||
|
||||
if vsc.Status.RestoreSize != nil {
|
||||
annotations[velerov1api.VolumeSnapshotRestoreSize] = resource.NewQuantity(
|
||||
*vsc.Status.RestoreSize, resource.BinarySI).String()
|
||||
@@ -373,7 +372,7 @@ func (p *volumeSnapshotBackupItemAction) Cancel(
|
||||
func NewVolumeSnapshotBackupItemAction(
|
||||
f client.Factory,
|
||||
) plugincommon.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (any, error) {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
crClient, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
|
||||
@@ -119,6 +119,6 @@ func (p *volumeSnapshotClassBackupItemAction) Cancel(
|
||||
|
||||
// NewVolumeSnapshotClassBackupItemAction returns a
|
||||
// VolumeSnapshotClassBackupItemAction instance.
|
||||
func NewVolumeSnapshotClassBackupItemAction(logger logrus.FieldLogger) (any, error) {
|
||||
func NewVolumeSnapshotClassBackupItemAction(logger logrus.FieldLogger) (interface{}, error) {
|
||||
return &volumeSnapshotClassBackupItemAction{log: logger}, nil
|
||||
}
|
||||
|
||||
@@ -136,6 +136,6 @@ func (p *volumeSnapshotContentBackupItemAction) Cancel(
|
||||
// VolumeSnapshotContentBackupItemAction instance.
|
||||
func NewVolumeSnapshotContentBackupItemAction(
|
||||
logger logrus.FieldLogger,
|
||||
) (any, error) {
|
||||
) (interface{}, error) {
|
||||
return &volumeSnapshotContentBackupItemAction{log: logger}, nil
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestRemapCRDVersionActionData(t *testing.T) {
|
||||
// For ElasticSearch and Kibana, problems manifested when additionalPrinterColumns was moved from the top-level spec down to the
|
||||
// versions slice.
|
||||
if test.expectAdditionalColumns {
|
||||
_, ok := item.UnstructuredContent()["spec"].(map[string]any)["additionalPrinterColumns"]
|
||||
_, ok := item.UnstructuredContent()["spec"].(map[string]interface{})["additionalPrinterColumns"]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// backedUpItemsMap keeps track of the items already backed up for the current Velero Backup
|
||||
type backedUpItemsMap struct {
|
||||
*sync.RWMutex
|
||||
backedUpItems map[itemKey]struct{}
|
||||
totalItems map[itemKey]struct{}
|
||||
}
|
||||
|
||||
func NewBackedUpItemsMap() *backedUpItemsMap {
|
||||
return &backedUpItemsMap{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
backedUpItems: make(map[itemKey]struct{}),
|
||||
totalItems: make(map[itemKey]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) CopyItemMap() map[itemKey]struct{} {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
returnMap := make(map[itemKey]struct{}, len(m.backedUpItems))
|
||||
for key, val := range m.backedUpItems {
|
||||
returnMap[key] = val
|
||||
}
|
||||
return returnMap
|
||||
}
|
||||
|
||||
// ResourceMap returns a map of the backed up items.
|
||||
// For each map entry, the key is the resource type,
|
||||
// and the value is a list of namespaced names for the resource.
|
||||
func (m *backedUpItemsMap) ResourceMap() map[string][]string {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
resources := map[string][]string{}
|
||||
for i := range m.backedUpItems {
|
||||
entry := i.name
|
||||
if i.namespace != "" {
|
||||
entry = fmt.Sprintf("%s/%s", i.namespace, i.name)
|
||||
}
|
||||
resources[i.resource] = append(resources[i.resource], entry)
|
||||
}
|
||||
|
||||
// sort namespace/name entries for each GVK
|
||||
for _, v := range resources {
|
||||
sort.Strings(v)
|
||||
}
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) Len() int {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return len(m.backedUpItems)
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) BackedUpAndTotalLen() (int, int) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return len(m.backedUpItems), len(m.totalItems)
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) Has(key itemKey) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
_, exists := m.backedUpItems[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) AddItem(key itemKey) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.backedUpItems[key] = struct{}{}
|
||||
m.totalItems[key] = struct{}{}
|
||||
}
|
||||
|
||||
func (m *backedUpItemsMap) AddItemToTotal(key itemKey) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.totalItems[key] = struct{}{}
|
||||
}
|
||||
@@ -26,12 +26,10 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -39,7 +37,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
@@ -74,9 +71,6 @@ const BackupVersion = 1
|
||||
// BackupFormatVersion is the current backup version for Velero, including major, minor, and patch.
|
||||
const BackupFormatVersion = "1.1.0"
|
||||
|
||||
// ArgoCD managed by namespace label key
|
||||
const ArgoCDManagedByNamespaceLabel = "argocd.argoproj.io/managed-by"
|
||||
|
||||
// Backupper performs backups.
|
||||
type Backupper interface {
|
||||
// Backup takes a backup using the specification in the velerov1api.Backup and writes backup and log data
|
||||
@@ -118,7 +112,6 @@ type kubernetesBackupper struct {
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
podVolumeBackupperFactory podvolume.BackupperFactory
|
||||
podVolumeTimeout time.Duration
|
||||
podVolumeContext context.Context
|
||||
defaultVolumesToFsBackup bool
|
||||
clientPageSize int
|
||||
uploaderType string
|
||||
@@ -239,7 +232,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
gzippedData := gzip.NewWriter(backupFile)
|
||||
defer gzippedData.Close()
|
||||
|
||||
tw := NewTarWriter(tar.NewWriter(gzippedData))
|
||||
tw := tar.NewWriter(gzippedData)
|
||||
defer tw.Close()
|
||||
|
||||
log.Info("Writing backup version file")
|
||||
@@ -251,18 +244,6 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
log.Infof("Including namespaces: %s", backupRequest.NamespaceIncludesExcludes.IncludesString())
|
||||
log.Infof("Excluding namespaces: %s", backupRequest.NamespaceIncludesExcludes.ExcludesString())
|
||||
|
||||
// check if there are any namespaces included in the backup which are managed by argoCD
|
||||
// We will check for the existence of a ArgoCD label in the includedNamespaces and add a warning
|
||||
// so that users are at least aware about the existence of argoCD managed ns in their backup
|
||||
// Related Issue: https://github.com/vmware-tanzu/velero/issues/7905
|
||||
if len(backupRequest.Spec.IncludedNamespaces) > 0 {
|
||||
nsManagedByArgoCD := getNamespacesManagedByArgoCD(kb.kbClient, backupRequest.Spec.IncludedNamespaces, log)
|
||||
|
||||
if len(nsManagedByArgoCD) > 0 {
|
||||
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
|
||||
}
|
||||
}
|
||||
|
||||
if collections.UseOldResourceFilters(backupRequest.Spec) {
|
||||
backupRequest.ResourceIncludesExcludes = collections.GetGlobalResourceIncludesExcludes(kb.discoveryHelper, log,
|
||||
backupRequest.Spec.IncludedResources,
|
||||
@@ -300,6 +281,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
return err
|
||||
}
|
||||
|
||||
backupRequest.BackedUpItems = map[itemKey]struct{}{}
|
||||
|
||||
podVolumeTimeout := kb.podVolumeTimeout
|
||||
if val := backupRequest.Annotations[velerov1api.PodVolumeOperationTimeoutAnnotation]; val != "" {
|
||||
parsed, err := time.ParseDuration(val)
|
||||
@@ -310,13 +293,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
}
|
||||
|
||||
var podVolumeCancelFunc context.CancelFunc
|
||||
kb.podVolumeContext, podVolumeCancelFunc = context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
defer podVolumeCancelFunc()
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
defer cancelFunc()
|
||||
|
||||
var podVolumeBackupper podvolume.Backupper
|
||||
if kb.podVolumeBackupperFactory != nil {
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(kb.podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(ctx, backupRequest.Backup, kb.uploaderType)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper")
|
||||
return errors.WithStack(err)
|
||||
@@ -355,7 +337,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
|
||||
var resourcePolicy *resourcepolicies.Policies
|
||||
var resourcePolicy *resourcepolicies.Policies = nil
|
||||
if backupRequest.ResPolicies != nil {
|
||||
resourcePolicy = backupRequest.ResPolicies
|
||||
}
|
||||
@@ -381,7 +363,6 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
),
|
||||
kubernetesBackupper: kb,
|
||||
}
|
||||
|
||||
// helper struct to send current progress between the main
|
||||
@@ -433,8 +414,6 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
}()
|
||||
|
||||
responseCtx, responseCancel := context.WithCancel(context.Background())
|
||||
|
||||
backedUpGroupResources := map[schema.GroupResource]bool{}
|
||||
// Maps items in the item list from GR+NamespacedName to a slice of pointers to kubernetesResources
|
||||
// We need the slice value since if the EnableAPIGroupVersions feature flag is set, there may
|
||||
@@ -447,71 +426,20 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
Name: items[i].name,
|
||||
}
|
||||
itemsMap[key] = append(itemsMap[key], items[i])
|
||||
// add to total items for progress reporting
|
||||
if items[i].kind != "" {
|
||||
backupRequest.BackedUpItems.AddItemToTotal(itemKey{
|
||||
resource: fmt.Sprintf("%s/%s", items[i].preferredGVR.GroupVersion().String(), items[i].kind),
|
||||
namespace: items[i].namespace,
|
||||
name: items[i].name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var itemBlock *BackupItemBlock
|
||||
itemBlockReturn := make(chan ItemBlockReturn, 100)
|
||||
wg := &sync.WaitGroup{}
|
||||
// Handle returns from worker pool processing ItemBlocks
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case response := <-itemBlockReturn: // process each BackupItemBlock response
|
||||
func() {
|
||||
defer wg.Done()
|
||||
if response.err != nil {
|
||||
log.WithError(errors.WithStack((response.err))).Error("Got error in BackupItemBlock.")
|
||||
}
|
||||
for _, backedUpGR := range response.resources {
|
||||
backedUpGroupResources[backedUpGR] = true
|
||||
}
|
||||
// We could eventually track which itemBlocks have finished
|
||||
// using response.itemBlock
|
||||
|
||||
// updated total is computed as "how many items we've backed up so far,
|
||||
// plus how many items are processed but not yet backed up plus how many
|
||||
// we know of that are remaining to be processed"
|
||||
backedUpItems, totalItems := backupRequest.BackedUpItems.BackedUpAndTotalLen()
|
||||
|
||||
// send a progress update
|
||||
update <- progressUpdate{
|
||||
totalItems: totalItems,
|
||||
itemsBackedUp: backedUpItems,
|
||||
}
|
||||
|
||||
if len(response.itemBlock.Items) > 0 {
|
||||
log.WithFields(map[string]any{
|
||||
"progress": "",
|
||||
"kind": response.itemBlock.Items[0].Item.GroupVersionKind().GroupKind().String(),
|
||||
"namespace": response.itemBlock.Items[0].Item.GetNamespace(),
|
||||
"name": response.itemBlock.Items[0].Item.GetName(),
|
||||
}).Infof("Backed up %d items out of an estimated total of %d (estimate will change throughout the backup)", backedUpItems, totalItems)
|
||||
}
|
||||
}()
|
||||
case <-responseCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i := range items {
|
||||
log.WithFields(map[string]any{
|
||||
log.WithFields(map[string]interface{}{
|
||||
"progress": "",
|
||||
"resource": items[i].groupResource.String(),
|
||||
"namespace": items[i].namespace,
|
||||
"name": items[i].name,
|
||||
}).Infof("Processing item")
|
||||
|
||||
// Skip if this item has already been processed (in a block or previously excluded)
|
||||
if items[i].inItemBlockOrExcluded {
|
||||
// Skip if this item has already been added to an ItemBlock
|
||||
if items[i].inItemBlock {
|
||||
log.Debugf("Not creating new ItemBlock for %s %s/%s because it's already in an ItemBlock", items[i].groupResource.String(), items[i].namespace, items[i].name)
|
||||
} else {
|
||||
if itemBlock == nil {
|
||||
@@ -546,31 +474,30 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
addNextToBlock := i < len(items)-1 && items[i].orderedResource && items[i+1].orderedResource && items[i].groupResource == items[i+1].groupResource
|
||||
if itemBlock != nil && len(itemBlock.Items) > 0 && !addNextToBlock {
|
||||
log.Infof("Backing Up Item Block including %s %s/%s (%v items in block)", items[i].groupResource.String(), items[i].namespace, items[i].name, len(itemBlock.Items))
|
||||
|
||||
wg.Add(1)
|
||||
backupRequest.ItemBlockChannel <- ItemBlockInput{
|
||||
itemBlock: itemBlock,
|
||||
returnChan: itemBlockReturn,
|
||||
backedUpGRs := kb.backupItemBlock(*itemBlock)
|
||||
for _, backedUpGR := range backedUpGRs {
|
||||
backedUpGroupResources[backedUpGR] = true
|
||||
}
|
||||
itemBlock = nil
|
||||
}
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
wg.Wait()
|
||||
}()
|
||||
// updated total is computed as "how many items we've backed up so far, plus
|
||||
// how many items we know of that are remaining"
|
||||
totalItems := len(backupRequest.BackedUpItems) + (len(items) - (i + 1))
|
||||
|
||||
// Wait for all the ItemBlocks to be processed
|
||||
select {
|
||||
case <-done:
|
||||
log.Info("done processing ItemBlocks")
|
||||
case <-responseCtx.Done():
|
||||
log.Info("ItemBlock processing canceled")
|
||||
// send a progress update
|
||||
update <- progressUpdate{
|
||||
totalItems: totalItems,
|
||||
itemsBackedUp: len(backupRequest.BackedUpItems),
|
||||
}
|
||||
|
||||
log.WithFields(map[string]interface{}{
|
||||
"progress": "",
|
||||
"resource": items[i].groupResource.String(),
|
||||
"namespace": items[i].namespace,
|
||||
"name": items[i].name,
|
||||
}).Infof("Backed up %d items out of an estimated total of %d (estimate will change throughout the backup)", len(backupRequest.BackedUpItems), totalItems)
|
||||
}
|
||||
// cancel response-processing goroutine
|
||||
responseCancel()
|
||||
|
||||
// no more progress updates will be sent on the 'update' channel
|
||||
quit <- struct{}{}
|
||||
@@ -595,9 +522,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
if updated.Status.Progress == nil {
|
||||
updated.Status.Progress = &velerov1api.BackupProgress{}
|
||||
}
|
||||
backedUpItems := backupRequest.BackedUpItems.Len()
|
||||
updated.Status.Progress.TotalItems = backedUpItems
|
||||
updated.Status.Progress.ItemsBackedUp = backedUpItems
|
||||
updated.Status.Progress.TotalItems = len(backupRequest.BackedUpItems)
|
||||
updated.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems)
|
||||
|
||||
// update the hooks execution status
|
||||
if updated.Status.HookStatus == nil {
|
||||
@@ -616,8 +542,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
log.Infof("Summary for skipped PVs: %s", skippedPVSummary)
|
||||
}
|
||||
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: backedUpItems, ItemsBackedUp: backedUpItems}
|
||||
log.WithField("progress", "").Infof("Backed up a total of %d items", backedUpItems)
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
|
||||
log.WithField("progress", "").Infof("Backed up a total of %d items", len(backupRequest.BackedUpItems))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -694,23 +620,12 @@ func (kb *kubernetesBackupper) executeItemBlockActions(
|
||||
continue
|
||||
}
|
||||
itemsMap[relatedItem] = append(itemsMap[relatedItem], &kubernetesResource{
|
||||
groupResource: relatedItem.GroupResource,
|
||||
preferredGVR: gvr,
|
||||
namespace: relatedItem.Namespace,
|
||||
name: relatedItem.Name,
|
||||
inItemBlockOrExcluded: true,
|
||||
groupResource: relatedItem.GroupResource,
|
||||
preferredGVR: gvr,
|
||||
namespace: relatedItem.Namespace,
|
||||
name: relatedItem.Name,
|
||||
inItemBlock: true,
|
||||
})
|
||||
|
||||
relatedItemMetadata, err := meta.Accessor(item)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Warn("Failed to get object metadata.")
|
||||
continue
|
||||
}
|
||||
// Don't add to ItemBlock if item is excluded
|
||||
// itemInclusionChecks logs the reason
|
||||
if !itemBlock.itemBackupper.itemInclusionChecks(log, false, relatedItemMetadata, item, relatedItem.GroupResource) {
|
||||
continue
|
||||
}
|
||||
log.Infof("adding %s %s/%s to ItemBlock", relatedItem.GroupResource, relatedItem.Namespace, relatedItem.Name)
|
||||
itemBlock.AddUnstructured(relatedItem.GroupResource, item, gvr)
|
||||
kb.executeItemBlockActions(log, item, relatedItem.GroupResource, relatedItem.Name, relatedItem.Namespace, itemsMap, itemBlock)
|
||||
@@ -718,7 +633,7 @@ func (kb *kubernetesBackupper) executeItemBlockActions(
|
||||
}
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) backupItemBlock(itemBlock *BackupItemBlock) []schema.GroupResource {
|
||||
func (kb *kubernetesBackupper) backupItemBlock(itemBlock BackupItemBlock) []schema.GroupResource {
|
||||
// find pods in ItemBlock
|
||||
// filter pods based on whether they still need to be backed up
|
||||
// this list will be used to run pre/post hooks
|
||||
@@ -726,64 +641,69 @@ func (kb *kubernetesBackupper) backupItemBlock(itemBlock *BackupItemBlock) []sch
|
||||
itemBlock.Log.Debug("Executing pre hooks")
|
||||
for _, item := range itemBlock.Items {
|
||||
if item.Gr == kuberesource.Pods {
|
||||
key, err := kb.getItemKey(item)
|
||||
metadata, key, err := kb.itemMetadataAndKey(item)
|
||||
if err != nil {
|
||||
itemBlock.Log.WithError(errors.WithStack(err)).Error("Error accessing pod metadata")
|
||||
continue
|
||||
}
|
||||
// Don't run hooks if pod is excluded
|
||||
if !itemBlock.itemBackupper.itemInclusionChecks(itemBlock.Log, false, metadata, item.Item, item.Gr) {
|
||||
continue
|
||||
}
|
||||
// Don't run hooks if pod has already been backed up
|
||||
if !itemBlock.itemBackupper.backupRequest.BackedUpItems.Has(key) {
|
||||
if _, exists := itemBlock.itemBackupper.backupRequest.BackedUpItems[key]; !exists {
|
||||
preHookPods = append(preHookPods, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
postHookPods, failedPods, errs := kb.handleItemBlockPreHooks(itemBlock, preHookPods)
|
||||
postHookPods, failedPods, errs := kb.handleItemBlockHooks(itemBlock, preHookPods, hook.PhasePre)
|
||||
for i, pod := range failedPods {
|
||||
itemBlock.Log.WithError(errs[i]).WithField("name", pod.Item.GetName()).Error("Error running pre hooks for pod")
|
||||
// if pre hook fails, flag pod as backed-up and move on
|
||||
key, err := kb.getItemKey(pod)
|
||||
_, key, err := kb.itemMetadataAndKey(pod)
|
||||
if err != nil {
|
||||
itemBlock.Log.WithError(errors.WithStack(err)).Error("Error accessing pod metadata")
|
||||
continue
|
||||
}
|
||||
itemBlock.itemBackupper.backupRequest.BackedUpItems.AddItem(key)
|
||||
itemBlock.itemBackupper.backupRequest.BackedUpItems[key] = struct{}{}
|
||||
}
|
||||
|
||||
itemBlock.Log.Debug("Backing up items in BackupItemBlock")
|
||||
var grList []schema.GroupResource
|
||||
for _, item := range itemBlock.Items {
|
||||
if backedUp := kb.backupItem(itemBlock.Log, item.Gr, itemBlock.itemBackupper, item.Item, item.PreferredGVR, itemBlock); backedUp {
|
||||
if backedUp := kb.backupItem(itemBlock.Log, item.Gr, itemBlock.itemBackupper, item.Item, item.PreferredGVR, &itemBlock); backedUp {
|
||||
grList = append(grList, item.Gr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(postHookPods) > 0 {
|
||||
itemBlock.Log.Debug("Executing post hooks")
|
||||
kb.handleItemBlockPostHooks(itemBlock, postHookPods)
|
||||
itemBlock.Log.Debug("Executing post hooks")
|
||||
_, failedPods, errs = kb.handleItemBlockHooks(itemBlock, postHookPods, hook.PhasePost)
|
||||
for i, pod := range failedPods {
|
||||
itemBlock.Log.WithError(errs[i]).WithField("name", pod.Item.GetName()).Error("Error running post hooks for pod")
|
||||
}
|
||||
|
||||
return grList
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) getItemKey(item itemblock.ItemBlockItem) (itemKey, error) {
|
||||
func (kb *kubernetesBackupper) itemMetadataAndKey(item itemblock.ItemBlockItem) (metav1.Object, itemKey, error) {
|
||||
metadata, err := meta.Accessor(item.Item)
|
||||
if err != nil {
|
||||
return itemKey{}, err
|
||||
return nil, itemKey{}, err
|
||||
}
|
||||
key := itemKey{
|
||||
resource: resourceKey(item.Item),
|
||||
namespace: metadata.GetNamespace(),
|
||||
name: metadata.GetName(),
|
||||
}
|
||||
return key, nil
|
||||
return metadata, key, nil
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) handleItemBlockPreHooks(itemBlock *BackupItemBlock, hookPods []itemblock.ItemBlockItem) ([]itemblock.ItemBlockItem, []itemblock.ItemBlockItem, []error) {
|
||||
func (kb *kubernetesBackupper) handleItemBlockHooks(itemBlock BackupItemBlock, hookPods []itemblock.ItemBlockItem, phase hook.HookPhase) ([]itemblock.ItemBlockItem, []itemblock.ItemBlockItem, []error) {
|
||||
var successPods []itemblock.ItemBlockItem
|
||||
var failedPods []itemblock.ItemBlockItem
|
||||
var errs []error
|
||||
for _, pod := range hookPods {
|
||||
err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks, hook.PhasePre, itemBlock.itemBackupper.hookTracker)
|
||||
err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks, phase, itemBlock.itemBackupper.hookTracker)
|
||||
if err == nil {
|
||||
successPods = append(successPods, pod)
|
||||
} else {
|
||||
@@ -794,65 +714,6 @@ func (kb *kubernetesBackupper) handleItemBlockPreHooks(itemBlock *BackupItemBloc
|
||||
return successPods, failedPods, errs
|
||||
}
|
||||
|
||||
// The hooks cannot execute until the PVBs to be processed
|
||||
func (kb *kubernetesBackupper) handleItemBlockPostHooks(itemBlock *BackupItemBlock, hookPods []itemblock.ItemBlockItem) {
|
||||
log := itemBlock.Log
|
||||
|
||||
// the post hooks will not execute until all PVBs of the item block pods are processed
|
||||
if err := kb.waitUntilPVBsProcessed(kb.podVolumeContext, log, itemBlock, hookPods); err != nil {
|
||||
log.WithError(err).Error("failed to wait PVBs processed for the ItemBlock")
|
||||
return
|
||||
}
|
||||
|
||||
for _, pod := range hookPods {
|
||||
if err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks,
|
||||
hook.PhasePost, itemBlock.itemBackupper.hookTracker); err != nil {
|
||||
log.WithError(err).WithField("name", pod.Item.GetName()).Error("Error running post hooks for pod")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wait all PVBs of the item block pods to be processed
|
||||
func (kb *kubernetesBackupper) waitUntilPVBsProcessed(ctx context.Context, log logrus.FieldLogger, itemBlock *BackupItemBlock, pods []itemblock.ItemBlockItem) error {
|
||||
pvbMap := map[*velerov1api.PodVolumeBackup]bool{}
|
||||
for _, pod := range pods {
|
||||
namespace, name := pod.Item.GetNamespace(), pod.Item.GetName()
|
||||
pvbs, err := itemBlock.itemBackupper.podVolumeBackupper.ListPodVolumeBackupsByPod(namespace, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to list PodVolumeBackups for pod %s/%s", namespace, name)
|
||||
}
|
||||
for _, pvb := range pvbs {
|
||||
pvbMap[pvb] = pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
|
||||
pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed
|
||||
}
|
||||
}
|
||||
|
||||
checkFunc := func(context.Context) (done bool, err error) {
|
||||
allProcessed := true
|
||||
for pvb, processed := range pvbMap {
|
||||
if processed {
|
||||
continue
|
||||
}
|
||||
updatedPVB, err := itemBlock.itemBackupper.podVolumeBackupper.GetPodVolumeBackupByPodAndVolume(pvb.Spec.Pod.Namespace, pvb.Spec.Pod.Name, pvb.Spec.Volume)
|
||||
if err != nil {
|
||||
allProcessed = false
|
||||
log.Infof("failed to get PVB: %v", err)
|
||||
continue
|
||||
}
|
||||
if updatedPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
|
||||
updatedPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed {
|
||||
pvbMap[pvb] = true
|
||||
continue
|
||||
}
|
||||
allProcessed = false
|
||||
}
|
||||
|
||||
return allProcessed, nil
|
||||
}
|
||||
|
||||
return wait.PollUntilContextCancel(ctx, 5*time.Second, true, checkFunc)
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) backupItem(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource, itemBlock *BackupItemBlock) bool {
|
||||
backedUpItem, _, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR, false, false, itemBlock)
|
||||
if aggregate, ok := err.(kubeerrs.Aggregate); ok {
|
||||
@@ -936,7 +797,7 @@ func (kb *kubernetesBackupper) backupCRD(log logrus.FieldLogger, gr schema.Group
|
||||
kb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr, nil)
|
||||
}
|
||||
|
||||
func (kb *kubernetesBackupper) writeBackupVersion(tw tarWriter) error {
|
||||
func (kb *kubernetesBackupper) writeBackupVersion(tw *tar.Writer) error {
|
||||
versionFile := filepath.Join(velerov1api.MetadataDir, "version")
|
||||
versionString := fmt.Sprintf("%s\n", BackupFormatVersion)
|
||||
|
||||
@@ -967,7 +828,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(
|
||||
) error {
|
||||
gzw := gzip.NewWriter(outBackupFile)
|
||||
defer gzw.Close()
|
||||
tw := NewTarWriter(tar.NewWriter(gzw))
|
||||
tw := tar.NewWriter(gzw)
|
||||
defer tw.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(inBackupFile)
|
||||
@@ -984,6 +845,8 @@ func (kb *kubernetesBackupper) FinalizeBackup(
|
||||
return err
|
||||
}
|
||||
|
||||
backupRequest.BackedUpItems = map[itemKey]struct{}{}
|
||||
|
||||
// set up a temp dir for the itemCollector to use to temporarily
|
||||
// store items as they're scraped from the API.
|
||||
tempDir, err := os.MkdirTemp("", "")
|
||||
@@ -1021,7 +884,6 @@ func (kb *kubernetesBackupper) FinalizeBackup(
|
||||
itemHookHandler: &hook.NoOpItemHookHandler{},
|
||||
podVolumeSnapshotTracker: podvolume.NewTracker(),
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
kubernetesBackupper: kb,
|
||||
}
|
||||
updateFiles := make(map[string]FileForArchive)
|
||||
backedUpGroupResources := map[schema.GroupResource]bool{}
|
||||
@@ -1029,7 +891,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(
|
||||
unstructuredDataUploads := make([]unstructured.Unstructured, 0)
|
||||
|
||||
for i, item := range items {
|
||||
log.WithFields(map[string]any{
|
||||
log.WithFields(map[string]interface{}{
|
||||
"progress": "",
|
||||
"resource": item.groupResource.String(),
|
||||
"namespace": item.namespace,
|
||||
@@ -1069,15 +931,14 @@ func (kb *kubernetesBackupper) FinalizeBackup(
|
||||
|
||||
// updated total is computed as "how many items we've backed up so far, plus
|
||||
// how many items we know of that are remaining"
|
||||
backedUpItems := backupRequest.BackedUpItems.Len()
|
||||
totalItems := backedUpItems + (len(items) - (i + 1))
|
||||
totalItems := len(backupRequest.BackedUpItems) + (len(items) - (i + 1))
|
||||
|
||||
log.WithFields(map[string]any{
|
||||
log.WithFields(map[string]interface{}{
|
||||
"progress": "",
|
||||
"resource": item.groupResource.String(),
|
||||
"namespace": item.namespace,
|
||||
"name": item.name,
|
||||
}).Infof("Updated %d items out of an estimated total of %d (estimate will change throughout the backup finalizer)", backedUpItems, totalItems)
|
||||
}).Infof("Updated %d items out of an estimated total of %d (estimate will change throughout the backup finalizer)", len(backupRequest.BackedUpItems), totalItems)
|
||||
}
|
||||
|
||||
volumeInfos, err := backupStore.GetBackupVolumeInfos(backupRequest.Backup.Name)
|
||||
@@ -1102,14 +963,12 @@ func (kb *kubernetesBackupper) FinalizeBackup(
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("progress", "").Infof("Updated a total of %d items", backupRequest.BackedUpItems.Len())
|
||||
log.WithField("progress", "").Infof("Updated a total of %d items", len(backupRequest.BackedUpItems))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildFinalTarball(tr *tar.Reader, tw tarWriter, updateFiles map[string]FileForArchive) error {
|
||||
tw.Lock()
|
||||
defer tw.Unlock()
|
||||
func buildFinalTarball(tr *tar.Reader, tw *tar.Writer, updateFiles map[string]FileForArchive) error {
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@@ -1160,16 +1019,10 @@ func buildFinalTarball(tr *tar.Reader, tw tarWriter, updateFiles map[string]File
|
||||
return nil
|
||||
}
|
||||
|
||||
type tarWriter struct {
|
||||
*tar.Writer
|
||||
*sync.Mutex
|
||||
}
|
||||
|
||||
func NewTarWriter(writer *tar.Writer) tarWriter {
|
||||
return tarWriter{
|
||||
Writer: writer,
|
||||
Mutex: &sync.Mutex{},
|
||||
}
|
||||
type tarWriter interface {
|
||||
io.Closer
|
||||
Write([]byte) (int, error)
|
||||
WriteHeader(*tar.Header) error
|
||||
}
|
||||
|
||||
// updateVolumeInfos update the VolumeInfos according to the AsyncOperations
|
||||
@@ -1190,8 +1043,7 @@ func updateVolumeInfos(
|
||||
|
||||
for index := range volumeInfos {
|
||||
if volumeInfos[index].PVCName == dataUpload.Spec.SourcePVC &&
|
||||
volumeInfos[index].PVCNamespace == dataUpload.Spec.SourceNamespace &&
|
||||
volumeInfos[index].SnapshotDataMovementInfo != nil {
|
||||
volumeInfos[index].PVCNamespace == dataUpload.Spec.SourceNamespace {
|
||||
if dataUpload.Status.CompletionTimestamp != nil {
|
||||
volumeInfos[index].CompletionTimestamp = dataUpload.Status.CompletionTimestamp
|
||||
}
|
||||
@@ -1255,26 +1107,3 @@ func putVolumeInfos(
|
||||
|
||||
return backupStore.PutBackupVolumeInfos(backupName, backupVolumeInfoBuf)
|
||||
}
|
||||
|
||||
func getNamespacesManagedByArgoCD(kbClient kbclient.Client, includedNamespaces []string, log logrus.FieldLogger) []string {
|
||||
var nsManagedByArgoCD []string
|
||||
|
||||
for _, nsName := range includedNamespaces {
|
||||
ns := corev1api.Namespace{}
|
||||
if err := kbClient.Get(context.Background(), kbclient.ObjectKey{Name: nsName}, &ns); err != nil {
|
||||
// check for only those ns that exist and are included in backup
|
||||
// here we ignore cases like "" or "*" specified under includedNamespaces
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("error getting namespace %s", nsName)
|
||||
continue
|
||||
}
|
||||
|
||||
nsLabels := ns.GetLabels()
|
||||
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 {
|
||||
nsManagedByArgoCD = append(nsManagedByArgoCD, nsName)
|
||||
}
|
||||
}
|
||||
return nsManagedByArgoCD
|
||||
}
|
||||
|
||||
@@ -72,14 +72,10 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
"v1/PersistentVolume": "persistentvolumes",
|
||||
}
|
||||
|
||||
h := newHarness(t, nil)
|
||||
defer h.itemBlockPool.Stop()
|
||||
|
||||
h := newHarness(t)
|
||||
req := &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
@@ -107,7 +103,7 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
// go through BackedUpItems after the backup to assemble the list of files we
|
||||
// expect to see in the tarball and compare to see if they match
|
||||
var expectedFiles []string
|
||||
for item := range req.BackedUpItems.CopyItemMap() {
|
||||
for item := range req.BackedUpItems {
|
||||
file := "resources/" + gvkToResource[item.resource]
|
||||
if item.namespace != "" {
|
||||
file = file + "/namespaces/" + item.namespace
|
||||
@@ -135,13 +131,10 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
||||
// backed up. It validates this by comparing their values to the length of
|
||||
// the request's BackedUpItems field.
|
||||
func TestBackupProgressIsUpdated(t *testing.T) {
|
||||
h := newHarness(t, nil)
|
||||
defer h.itemBlockPool.Stop()
|
||||
h := newHarness(t)
|
||||
req := &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -166,8 +159,8 @@ func TestBackupProgressIsUpdated(t *testing.T) {
|
||||
h.backupper.Backup(h.log, req, backupFile, nil, nil, nil)
|
||||
|
||||
require.NotNil(t, req.Status.Progress)
|
||||
assert.Equal(t, req.BackedUpItems.Len(), req.Status.Progress.TotalItems)
|
||||
assert.Equal(t, req.BackedUpItems.Len(), req.Status.Progress.ItemsBackedUp)
|
||||
assert.Len(t, req.BackedUpItems, req.Status.Progress.TotalItems)
|
||||
assert.Len(t, req.BackedUpItems, req.Status.Progress.ItemsBackedUp)
|
||||
}
|
||||
|
||||
// TestBackupOldResourceFiltering runs backups with different combinations
|
||||
@@ -871,17 +864,13 @@ func TestBackupOldResourceFiltering(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1052,17 +1041,13 @@ func TestCRDInclusion(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1151,17 +1136,13 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1182,15 +1163,12 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
// backed up in each backup. Verification is done by looking at the contents of the backup
|
||||
// tarball. This covers a specific issue that was fixed by https://github.com/vmware-tanzu/velero/pull/485.
|
||||
func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
h := newHarness(t, nil)
|
||||
defer h.itemBlockPool.Stop()
|
||||
h := newHarness(t)
|
||||
|
||||
// run and verify backup 1
|
||||
backup1 := &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backup1File := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -1205,8 +1183,6 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
backup2 := &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backup2File := bytes.NewBuffer([]byte{})
|
||||
|
||||
@@ -1250,17 +1226,13 @@ func TestBackupResourceOrdering(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1361,9 +1333,6 @@ func (a *recordResourcesAction) WithSkippedCSISnapshotFlag(flag bool) *recordRes
|
||||
// TestBackupItemActionsForSkippedPV runs backups with backup item actions, and
|
||||
// verifies that the data in SkippedPVTracker is updated as expected.
|
||||
func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
backupReq *Request
|
||||
@@ -1380,15 +1349,13 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
backupReq: &Request{
|
||||
Backup: defaultBackup().SnapshotVolumes(false).Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
resPolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Action: resourcepolicies.Action{Type: "snapshot"},
|
||||
Conditions: map[string]any{
|
||||
Conditions: map[string]interface{}{
|
||||
"storageClass": []string{"gp2"},
|
||||
},
|
||||
},
|
||||
@@ -1428,8 +1395,6 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
},
|
||||
includedPVs: map[string]struct{}{},
|
||||
},
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVCs(
|
||||
@@ -1455,7 +1420,7 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(tt *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
fakeClient = test.NewFakeControllerRuntimeClient(t, tc.runtimeResources...)
|
||||
)
|
||||
@@ -1669,17 +1634,13 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1754,17 +1715,13 @@ func TestBackupWithInvalidActions(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -1880,7 +1837,7 @@ func TestBackupActionModifications(t *testing.T) {
|
||||
},
|
||||
actions: []biav2.BackupItemAction{
|
||||
modifyingActionGetter(func(item *unstructured.Unstructured) {
|
||||
item.Object["spec"].(map[string]any)["nodeName"] = "foo"
|
||||
item.Object["spec"].(map[string]interface{})["nodeName"] = "foo"
|
||||
}),
|
||||
},
|
||||
want: map[string]unstructuredObject{
|
||||
@@ -1908,17 +1865,13 @@ func TestBackupActionModifications(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2168,17 +2121,13 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2429,17 +2378,13 @@ func TestItemBlockActionsRunForCorrectItems(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2514,17 +2459,13 @@ func TestBackupWithInvalidItemBlockActions(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2770,17 +2711,13 @@ func TestItemBlockActionRelatedItems(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -2929,8 +2866,6 @@ func (*fakeVolumeSnapshotter) DeleteSnapshot(snapshotID string) error {
|
||||
// struct in place of real volume snapshotters.
|
||||
func TestBackupWithSnapshots(t *testing.T) {
|
||||
// TODO: add more verification for skippedPVTracker
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
tests := []struct {
|
||||
name string
|
||||
req *Request
|
||||
@@ -2947,8 +2882,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -2983,8 +2916,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3020,8 +2951,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3057,8 +2986,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3094,8 +3021,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3129,8 +3054,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3147,8 +3070,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3168,8 +3089,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3187,8 +3106,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "default", "default"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3209,8 +3126,6 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
newSnapshotLocation("velero", "another", "another"),
|
||||
},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
@@ -3258,7 +3173,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
|
||||
@@ -3329,8 +3244,6 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
tests := []struct {
|
||||
name string
|
||||
req *Request
|
||||
@@ -3343,8 +3256,6 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3375,8 +3286,6 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3407,8 +3316,6 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
req: &Request{
|
||||
Backup: defaultBackup().Result(),
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -3425,7 +3332,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
|
||||
@@ -3484,17 +3391,13 @@ func TestBackupWithInvalidHooks(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -3958,17 +3861,13 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
podCommandExecutor = new(test.MockPodCommandExecutor)
|
||||
@@ -4004,7 +3903,7 @@ func TestBackupWithHooks(t *testing.T) {
|
||||
|
||||
type fakePodVolumeBackupperFactory struct{}
|
||||
|
||||
func (f *fakePodVolumeBackupperFactory) NewBackupper(context.Context, logrus.FieldLogger, *velerov1.Backup, string) (podvolume.Backupper, error) {
|
||||
func (f *fakePodVolumeBackupperFactory) NewBackupper(context.Context, *velerov1.Backup, string) (podvolume.Backupper, error) {
|
||||
return &fakePodVolumeBackupper{}, nil
|
||||
}
|
||||
|
||||
@@ -4037,24 +3936,6 @@ func (b *fakePodVolumeBackupper) WaitAllPodVolumesProcessed(log logrus.FieldLogg
|
||||
return b.pvbs
|
||||
}
|
||||
|
||||
func (b *fakePodVolumeBackupper) GetPodVolumeBackupByPodAndVolume(podNamespace, podName, volume string) (*velerov1.PodVolumeBackup, error) {
|
||||
for _, pvb := range b.pvbs {
|
||||
if pvb.Spec.Pod.Namespace == podNamespace && pvb.Spec.Pod.Name == podName && pvb.Spec.Volume == volume {
|
||||
return pvb, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (b *fakePodVolumeBackupper) ListPodVolumeBackupsByPod(podNamespace, podName string) ([]*velerov1.PodVolumeBackup, error) {
|
||||
var pvbs []*velerov1.PodVolumeBackup
|
||||
for _, pvb := range b.pvbs {
|
||||
if pvb.Spec.Pod.Namespace == podNamespace && pvb.Spec.Pod.Name == podName {
|
||||
pvbs = append(pvbs, pvb)
|
||||
}
|
||||
}
|
||||
return pvbs, nil
|
||||
}
|
||||
|
||||
// TestBackupWithPodVolume runs backups of pods that are annotated for PodVolume backup,
|
||||
// and ensures that the pod volume backupper is called, that the returned PodVolumeBackups
|
||||
// are added to the Request object, and that when PVCs are backed up with PodVolume, the
|
||||
@@ -4182,18 +4063,14 @@ func TestBackupWithPodVolume(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{tc.vsl},
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -4278,9 +4155,8 @@ func (a *pluggableIBA) Name() string {
|
||||
|
||||
type harness struct {
|
||||
*test.APIServer
|
||||
backupper *kubernetesBackupper
|
||||
log logrus.FieldLogger
|
||||
itemBlockPool ItemBlockWorkerPool
|
||||
backupper *kubernetesBackupper
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
func (h *harness) addItems(t *testing.T, resource *test.APIResource) {
|
||||
@@ -4304,7 +4180,7 @@ func (h *harness) addItems(t *testing.T, resource *test.APIResource) {
|
||||
}
|
||||
}
|
||||
|
||||
func newHarness(t *testing.T, itemBlockPool *ItemBlockWorkerPool) *harness {
|
||||
func newHarness(t *testing.T) *harness {
|
||||
t.Helper()
|
||||
|
||||
apiServer := test.NewAPIServer(t)
|
||||
@@ -4313,9 +4189,6 @@ func newHarness(t *testing.T, itemBlockPool *ItemBlockWorkerPool) *harness {
|
||||
discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, log)
|
||||
require.NoError(t, err)
|
||||
|
||||
if itemBlockPool == nil {
|
||||
itemBlockPool = StartItemBlockWorkerPool(context.Background(), 1, log)
|
||||
}
|
||||
return &harness{
|
||||
APIServer: apiServer,
|
||||
backupper: &kubernetesBackupper{
|
||||
@@ -4326,10 +4199,9 @@ func newHarness(t *testing.T, itemBlockPool *ItemBlockWorkerPool) *harness {
|
||||
// unsupported
|
||||
podCommandExecutor: nil,
|
||||
podVolumeBackupperFactory: new(fakePodVolumeBackupperFactory),
|
||||
podVolumeTimeout: 60 * time.Second,
|
||||
podVolumeTimeout: 0,
|
||||
},
|
||||
log: log,
|
||||
itemBlockPool: *itemBlockPool,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4349,7 +4221,7 @@ func defaultBackup() *builder.BackupBuilder {
|
||||
return builder.ForBackup(velerov1.DefaultNamespace, "backup-1").DefaultVolumesToFsBackup(false)
|
||||
}
|
||||
|
||||
func toUnstructuredOrFail(t *testing.T, obj any) map[string]any {
|
||||
func toUnstructuredOrFail(t *testing.T, obj interface{}) map[string]interface{} {
|
||||
t.Helper()
|
||||
|
||||
res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
|
||||
@@ -4385,7 +4257,7 @@ func assertTarballContents(t *testing.T, backupFile io.Reader, items ...string)
|
||||
}
|
||||
|
||||
// unstructuredObject is a type alias to improve readability.
|
||||
type unstructuredObject map[string]any
|
||||
type unstructuredObject map[string]interface{}
|
||||
|
||||
// assertTarballFileContents verifies that the gzipped tarball stored in the provided
|
||||
// backupFile contains the files specified as keys in 'want', and for each of those
|
||||
@@ -5302,17 +5174,13 @@ func TestBackupNewResourceFiltering(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5467,17 +5335,13 @@ func TestBackupNamespaces(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
itemBlockPool := StartItemBlockWorkerPool(context.Background(), 1, logrus.StandardLogger())
|
||||
defer itemBlockPool.Stop()
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t, itemBlockPool)
|
||||
h = newHarness(t)
|
||||
req = &Request{
|
||||
Backup: tc.backup,
|
||||
SkippedPVTracker: NewSkipPVTracker(),
|
||||
BackedUpItems: NewBackedUpItemsMap(),
|
||||
ItemBlockChannel: itemBlockPool.GetInputChannel(),
|
||||
}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
@@ -5646,36 +5510,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// This is an error case. No crash happen here is good enough.
|
||||
name: "VolumeInfo doesn't have SnapshotDataMovementInfo when there is a matching DataUpload",
|
||||
operations: []*itemoperation.BackupOperation{},
|
||||
dataUpload: builder.ForDataUpload("velero", "du-1").
|
||||
CompletionTimestamp(&now).
|
||||
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
|
||||
SnapshotID("snapshot-id").
|
||||
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
|
||||
Phase(velerov2alpha1.DataUploadPhaseCompleted).
|
||||
SourceNamespace("ns-1").
|
||||
SourcePVC("pvc-1").
|
||||
Result(),
|
||||
volumeInfos: []*volume.BackupVolumeInfo{
|
||||
{
|
||||
PVCName: "pvc-1",
|
||||
PVCNamespace: "ns-1",
|
||||
CompletionTimestamp: &metav1.Time{},
|
||||
SnapshotDataMovementInfo: nil,
|
||||
},
|
||||
},
|
||||
expectedVolumeInfos: []*volume.BackupVolumeInfo{
|
||||
{
|
||||
PVCName: "pvc-1",
|
||||
PVCNamespace: "ns-1",
|
||||
CompletionTimestamp: &metav1.Time{},
|
||||
SnapshotDataMovementInfo: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -5692,10 +5526,8 @@ func TestUpdateVolumeInfos(t *testing.T) {
|
||||
}
|
||||
|
||||
require.NoError(t, updateVolumeInfos(tc.volumeInfos, unstructures, tc.operations, logger))
|
||||
if len(tc.expectedVolumeInfos) > 0 {
|
||||
require.Equal(t, tc.expectedVolumeInfos[0].CompletionTimestamp, tc.volumeInfos[0].CompletionTimestamp)
|
||||
require.Equal(t, tc.expectedVolumeInfos[0].SnapshotDataMovementInfo, tc.volumeInfos[0].SnapshotDataMovementInfo)
|
||||
}
|
||||
require.Equal(t, tc.expectedVolumeInfos[0].CompletionTimestamp, tc.volumeInfos[0].CompletionTimestamp)
|
||||
require.Equal(t, tc.expectedVolumeInfos[0].SnapshotDataMovementInfo, tc.volumeInfos[0].SnapshotDataMovementInfo)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,6 @@ type itemBackupper struct {
|
||||
podVolumeBackupper podvolume.Backupper
|
||||
podVolumeSnapshotTracker *podvolume.Tracker
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
kubernetesBackupper *kubernetesBackupper
|
||||
|
||||
itemHookHandler hook.ItemHookHandler
|
||||
snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter
|
||||
@@ -96,8 +95,6 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
||||
if !selectedForBackup || err != nil || len(files) == 0 || finalize {
|
||||
return selectedForBackup, files, err
|
||||
}
|
||||
ib.tarWriter.Lock()
|
||||
defer ib.tarWriter.Unlock()
|
||||
for _, file := range files {
|
||||
if err := ib.tarWriter.WriteHeader(file.Header); err != nil {
|
||||
return false, []FileForArchive{}, errors.WithStack(err)
|
||||
@@ -162,7 +159,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
||||
namespace := metadata.GetNamespace()
|
||||
name := metadata.GetName()
|
||||
|
||||
log := logger.WithFields(map[string]any{
|
||||
log := logger.WithFields(map[string]interface{}{
|
||||
"name": name,
|
||||
"resource": groupResource.String(),
|
||||
"namespace": namespace,
|
||||
@@ -178,12 +175,12 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
||||
name: name,
|
||||
}
|
||||
|
||||
if ib.backupRequest.BackedUpItems.Has(key) {
|
||||
if _, exists := ib.backupRequest.BackedUpItems[key]; exists {
|
||||
log.Info("Skipping item because it's already been backed up.")
|
||||
// returning true since this item *is* in the backup, even though we're not backing it up here
|
||||
return true, itemFiles, nil
|
||||
}
|
||||
ib.backupRequest.BackedUpItems.AddItem(key)
|
||||
ib.backupRequest.BackedUpItems[key] = struct{}{}
|
||||
log.Info("Backing up item")
|
||||
|
||||
var (
|
||||
@@ -221,7 +218,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
||||
ib.podVolumeSnapshotTracker.Track(pod, volume.Name)
|
||||
|
||||
if found, pvcName := ib.podVolumeSnapshotTracker.TakenForPodVolume(pod, volume.Name); found {
|
||||
log.WithFields(map[string]any{
|
||||
log.WithFields(map[string]interface{}{
|
||||
"podVolume": volume,
|
||||
"pvcName": pvcName,
|
||||
}).Info("Pod volume uses a persistent volume claim which has already been backed up from another pod, skipping.")
|
||||
@@ -593,18 +590,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
}
|
||||
|
||||
if ib.backupRequest.ResPolicies != nil {
|
||||
pvc := new(corev1api.PersistentVolumeClaim)
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
err = ib.kbClient.Get(context.Background(), kbClient.ObjectKey{
|
||||
Namespace: pv.Spec.ClaimRef.Namespace,
|
||||
Name: pv.Spec.ClaimRef.Name},
|
||||
pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
if action, err := ib.backupRequest.ResPolicies.GetMatchAction(vfd); err != nil {
|
||||
if action, err := ib.backupRequest.ResPolicies.GetMatchAction(pv); err != nil {
|
||||
log.WithError(err).Errorf("Error getting matched resource policies for pv %s", pv.Name)
|
||||
return nil
|
||||
} else if action != nil && action.Type == resourcepolicies.Skip {
|
||||
@@ -707,8 +693,8 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
|
||||
func (ib *itemBackupper) getMatchAction(obj runtime.Unstructured, groupResource schema.GroupResource, backupItemActionName string) (*resourcepolicies.Action, error) {
|
||||
if ib.backupRequest.ResPolicies != nil && groupResource == kuberesource.PersistentVolumeClaims && (backupItemActionName == csiBIAPluginName || backupItemActionName == vsphereBIAPluginName) {
|
||||
pvc := &corev1api.PersistentVolumeClaim{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil {
|
||||
pvc := corev1api.PersistentVolumeClaim{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pvc); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -721,8 +707,7 @@ func (ib *itemBackupper) getMatchAction(obj runtime.Unstructured, groupResource
|
||||
if err := ib.kbClient.Get(context.Background(), kbClient.ObjectKey{Name: pvName}, pv); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
vfd := resourcepolicies.NewVolumeFilterData(pv, nil, pvc)
|
||||
return ib.backupRequest.ResPolicies.GetMatchAction(vfd)
|
||||
return ib.backupRequest.ResPolicies.GetMatchAction(pv)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
/*
|
||||
Copyright the Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type ItemBlockWorkerPool struct {
|
||||
inputChannel chan ItemBlockInput
|
||||
wg *sync.WaitGroup
|
||||
logger logrus.FieldLogger
|
||||
cancelFunc context.CancelFunc
|
||||
}
|
||||
|
||||
type ItemBlockInput struct {
|
||||
itemBlock *BackupItemBlock
|
||||
returnChan chan ItemBlockReturn
|
||||
}
|
||||
|
||||
type ItemBlockReturn struct {
|
||||
itemBlock *BackupItemBlock
|
||||
resources []schema.GroupResource
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *ItemBlockWorkerPool) GetInputChannel() chan ItemBlockInput {
|
||||
return p.inputChannel
|
||||
}
|
||||
|
||||
func StartItemBlockWorkerPool(ctx context.Context, workers int, log logrus.FieldLogger) *ItemBlockWorkerPool {
|
||||
// Buffer will hold up to 10 ItemBlocks waiting for processing
|
||||
inputChannel := make(chan ItemBlockInput, max(workers, 10))
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(ctx)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
logger := log.WithField("worker", i)
|
||||
wg.Add(1)
|
||||
go processItemBlockWorker(ctx, inputChannel, logger, wg)
|
||||
}
|
||||
|
||||
pool := &ItemBlockWorkerPool{
|
||||
inputChannel: inputChannel,
|
||||
cancelFunc: cancelFunc,
|
||||
logger: log,
|
||||
wg: wg,
|
||||
}
|
||||
return pool
|
||||
}
|
||||
|
||||
func (p *ItemBlockWorkerPool) Stop() {
|
||||
p.cancelFunc()
|
||||
p.logger.Info("ItemBlock worker stopping")
|
||||
p.wg.Wait()
|
||||
p.logger.Info("ItemBlock worker stopped")
|
||||
}
|
||||
|
||||
func processItemBlockWorker(ctx context.Context,
|
||||
inputChannel chan ItemBlockInput,
|
||||
logger logrus.FieldLogger,
|
||||
wg *sync.WaitGroup) {
|
||||
for {
|
||||
select {
|
||||
case m := <-inputChannel:
|
||||
logger.Infof("processing ItemBlock for backup %v", m.itemBlock.itemBackupper.backupRequest.Name)
|
||||
grList := m.itemBlock.itemBackupper.kubernetesBackupper.backupItemBlock(m.itemBlock)
|
||||
logger.Infof("finished processing ItemBlock for backup %v", m.itemBlock.itemBackupper.backupRequest.Name)
|
||||
m.returnChan <- ItemBlockReturn{
|
||||
itemBlock: m.itemBlock,
|
||||
resources: grList,
|
||||
err: nil,
|
||||
}
|
||||
case <-ctx.Done():
|
||||
logger.Info("stopping ItemBlock worker")
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -176,11 +176,7 @@ type kubernetesResource struct {
|
||||
preferredGVR schema.GroupVersionResource
|
||||
namespace, name, path string
|
||||
orderedResource bool
|
||||
// set to true during backup processing when added to an ItemBlock
|
||||
// or if the item is excluded from backup.
|
||||
inItemBlockOrExcluded bool
|
||||
// Kind is added to facilitate creating an itemKey for progress tracking
|
||||
kind string
|
||||
inItemBlock bool // set to true during backup processing when added to an ItemBlock
|
||||
}
|
||||
|
||||
// getItemsFromResourceIdentifiers get the kubernetesResources
|
||||
@@ -409,7 +405,6 @@ func (r *itemCollector) getResourceItems(
|
||||
namespace: resourceID.Namespace,
|
||||
name: resourceID.Name,
|
||||
path: path,
|
||||
kind: resource.Kind,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -483,7 +478,6 @@ func (r *itemCollector) getResourceItems(
|
||||
namespace: item.GetNamespace(),
|
||||
name: item.GetName(),
|
||||
path: path,
|
||||
kind: resource.Kind,
|
||||
})
|
||||
|
||||
if item.GetNamespace() != "" {
|
||||
@@ -810,7 +804,6 @@ func (r *itemCollector) collectNamespaces(
|
||||
preferredGVR: preferredGVR,
|
||||
name: unstructuredList.Items[index].GetName(),
|
||||
path: path,
|
||||
kind: resource.Kind,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/itemblock"
|
||||
@@ -42,12 +41,12 @@ func NewBackupItemBlock(log logrus.FieldLogger, itemBackupper *itemBackupper) *B
|
||||
}
|
||||
|
||||
func (b *BackupItemBlock) addKubernetesResource(item *kubernetesResource, log logrus.FieldLogger) *unstructured.Unstructured {
|
||||
// no-op if item has already been processed (in a block or previously excluded)
|
||||
if item.inItemBlockOrExcluded {
|
||||
// no-op if item is already in a block
|
||||
if item.inItemBlock {
|
||||
return nil
|
||||
}
|
||||
var unstructured unstructured.Unstructured
|
||||
item.inItemBlockOrExcluded = true
|
||||
item.inItemBlock = true
|
||||
|
||||
f, err := os.Open(item.path)
|
||||
if err != nil {
|
||||
@@ -61,18 +60,6 @@ func (b *BackupItemBlock) addKubernetesResource(item *kubernetesResource, log lo
|
||||
log.WithError(errors.WithStack(err)).Error("Error decoding JSON from file")
|
||||
return nil
|
||||
}
|
||||
|
||||
metadata, err := meta.Accessor(&unstructured)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Warn("Error accessing item metadata")
|
||||
return nil
|
||||
}
|
||||
// Don't add to ItemBlock if item is excluded
|
||||
// itemInclusionChecks logs the reason
|
||||
if !b.itemBackupper.itemInclusionChecks(log, false, metadata, &unstructured, item.groupResource) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("adding %s %s/%s to ItemBlock", item.groupResource, item.namespace, item.name)
|
||||
b.AddUnstructured(item.groupResource, &unstructured, item.preferredGVR)
|
||||
return &unstructured
|
||||
|
||||
@@ -17,6 +17,9 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
@@ -46,12 +49,11 @@ type Request struct {
|
||||
ResolvedItemBlockActions []framework.ItemBlockResolvedAction
|
||||
VolumeSnapshots []*volume.Snapshot
|
||||
PodVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
BackedUpItems *backedUpItemsMap
|
||||
BackedUpItems map[itemKey]struct{}
|
||||
itemOperationsList *[]*itemoperation.BackupOperation
|
||||
ResPolicies *resourcepolicies.Policies
|
||||
SkippedPVTracker *skipPVTracker
|
||||
VolumesInformation volume.BackupVolumesInformation
|
||||
ItemBlockChannel chan ItemBlockInput
|
||||
}
|
||||
|
||||
// BackupVolumesInformation contains the information needs by generating
|
||||
@@ -69,7 +71,21 @@ func (r *Request) GetItemOperationsList() *[]*itemoperation.BackupOperation {
|
||||
// BackupResourceList returns the list of backed up resources grouped by the API
|
||||
// Version and Kind
|
||||
func (r *Request) BackupResourceList() map[string][]string {
|
||||
return r.BackedUpItems.ResourceMap()
|
||||
resources := map[string][]string{}
|
||||
for i := range r.BackedUpItems {
|
||||
entry := i.name
|
||||
if i.namespace != "" {
|
||||
entry = fmt.Sprintf("%s/%s", i.namespace, i.name)
|
||||
}
|
||||
resources[i.resource] = append(resources[i.resource], entry)
|
||||
}
|
||||
|
||||
// sort namespace/name entries for each GVK
|
||||
for _, v := range resources {
|
||||
sort.Strings(v)
|
||||
}
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
func (r *Request) FillVolumesInformation() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user