mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-20 11:42:53 +00:00
Compare commits
2 Commits
dependabot
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2dea6b7d71 | ||
|
|
128d9427dc |
60
.github/workflows/e2e-test-kind.yaml
vendored
60
.github/workflows/e2e-test-kind.yaml
vendored
@@ -8,26 +8,16 @@ on:
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
outputs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
go-version-file: 'go.mod'
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
@@ -54,26 +44,6 @@ jobs:
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
|
||||
docker save velero:pr-test-linux-amd64 -o ./velero.tar
|
||||
# Check and build MinIO image once for all e2e tests
|
||||
- name: Check Bitnami MinIO Dockerfile version
|
||||
id: minio-version
|
||||
run: |
|
||||
DOCKERFILE_SHA=$(curl -s https://api.github.com/repos/bitnami/containers/commits?path=bitnami/minio/2025/debian-12/Dockerfile\&per_page=1 | jq -r '.[0].sha')
|
||||
echo "dockerfile_sha=${DOCKERFILE_SHA}" >> $GITHUB_OUTPUT
|
||||
- name: Cache MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
- name: Build MinIO Image from Bitnami Dockerfile
|
||||
if: steps.minio-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Building MinIO image from Bitnami Dockerfile..."
|
||||
git clone --depth 1 https://github.com/bitnami/containers.git /tmp/bitnami-containers
|
||||
cd /tmp/bitnami-containers/bitnami/minio/2025/debian-12
|
||||
docker build -t bitnami/minio:local .
|
||||
docker save bitnami/minio:local > ${{ github.workspace }}/minio-image.tar
|
||||
# Create json of k8s versions to test
|
||||
# from guide: https://stackoverflow.com/a/65094398/4590470
|
||||
setup-test-matrix:
|
||||
@@ -105,7 +75,6 @@ jobs:
|
||||
needs:
|
||||
- build
|
||||
- setup-test-matrix
|
||||
- get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
|
||||
@@ -113,26 +82,13 @@ jobs:
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
# Fetch the pre-built MinIO image from the build job
|
||||
- name: Fetch built MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ needs.build.outputs.minio-dockerfile-sha }}
|
||||
- name: Load MinIO Image
|
||||
run: |
|
||||
echo "Loading MinIO image..."
|
||||
docker load < ./minio-image.tar
|
||||
go-version-file: 'go.mod'
|
||||
- name: Install MinIO
|
||||
run: |
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ROOT_USER=minio" -e "MINIO_ROOT_PASSWORD=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:local
|
||||
run:
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
|
||||
- uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
skipClusterLogsExport: true
|
||||
|
||||
33
.github/workflows/get-go-version.yaml
vendored
33
.github/workflows/get-go-version.yaml
vendored
@@ -1,33 +0,0 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: "The target branch's ref"
|
||||
required: true
|
||||
type: string
|
||||
outputs:
|
||||
version:
|
||||
description: "The expected Go version"
|
||||
value: ${{ jobs.extract.outputs.version }}
|
||||
|
||||
jobs:
|
||||
extract:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.pick-version.outputs.version }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- id: pick-version
|
||||
run: |
|
||||
if [ "${{ inputs.ref }}" == "main" ]; then
|
||||
version=$(grep '^go ' go.mod | awk '{print $2}' | cut -d. -f1-2)
|
||||
else
|
||||
goDirectiveVersion=$(grep '^go ' go.mod | awk '{print $2}')
|
||||
toolChainVersion=$(grep '^toolchain ' go.mod | awk '{print $2}')
|
||||
version=$(printf "%s\n%s\n" "$goDirectiveVersion" "$toolChainVersion" | sort -V | tail -n1)
|
||||
fi
|
||||
|
||||
echo "version=$version"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
2
.github/workflows/nightly-trivy-scan.yml
vendored
2
.github/workflows/nightly-trivy-scan.yml
vendored
@@ -31,6 +31,6 @@ jobs:
|
||||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v4
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
14
.github/workflows/pr-ci-check.yml
vendored
14
.github/workflows/pr-ci-check.yml
vendored
@@ -1,26 +1,18 @@
|
||||
name: Pull Request CI Check
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run CI
|
||||
needs: get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
go-version-file: 'go.mod'
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
- name: Upload test coverage
|
||||
|
||||
14
.github/workflows/pr-linter-check.yml
vendored
14
.github/workflows/pr-linter-check.yml
vendored
@@ -7,24 +7,16 @@ on:
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run Linter Check
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
go-version-file: 'go.mod'
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
|
||||
13
.github/workflows/push.yml
vendored
13
.github/workflows/push.yml
vendored
@@ -9,24 +9,17 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
go-version-file: 'go.mod'
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.73
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, and 1.33.1 |
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Get pod list once per namespace in pvc IBA
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9229, don't attach backupPVC to the source node
|
||||
@@ -1 +0,0 @@
|
||||
Update AzureAD Microsoft Authentication Library to v1.5.0
|
||||
@@ -1 +0,0 @@
|
||||
Protect VolumeSnapshot field from race condition during multi-thread backup
|
||||
@@ -1 +0,0 @@
|
||||
Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment
|
||||
@@ -1 +0,0 @@
|
||||
Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases
|
||||
@@ -1 +0,0 @@
|
||||
Implement concurrency control for cache of native VolumeSnapshotter plugin.
|
||||
@@ -1 +0,0 @@
|
||||
Add option for privileged fs-backup pod
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
@@ -1 +0,0 @@
|
||||
VerifyJSONConfigs verify every elements in Data.
|
||||
@@ -1 +0,0 @@
|
||||
Fix typos in documentation
|
||||
58
go.mod
58
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.24.0
|
||||
go 1.24
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
@@ -17,7 +17,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7
|
||||
github.com/bombsimon/logrusr/v3 v3.0.0
|
||||
github.com/evanphx/json-patch/v5 v5.9.11
|
||||
github.com/evanphx/json-patch/v5 v5.9.0
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/google/go-cmp v0.7.0
|
||||
@@ -27,8 +27,8 @@ require (
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/kopia/kopia v0.16.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0
|
||||
github.com/onsi/ginkgo/v2 v2.22.0
|
||||
github.com/onsi/gomega v1.36.1
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
@@ -49,17 +49,17 @@ require (
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.33.3
|
||||
k8s.io/apiextensions-apiserver v0.33.3
|
||||
k8s.io/apimachinery v0.33.3
|
||||
k8s.io/cli-runtime v0.33.3
|
||||
k8s.io/client-go v0.33.3
|
||||
k8s.io/api v0.31.3
|
||||
k8s.io/apiextensions-apiserver v0.31.3
|
||||
k8s.io/apimachinery v0.31.3
|
||||
k8s.io/cli-runtime v0.31.3
|
||||
k8s.io/client-go v0.31.3
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-aggregator v0.33.3
|
||||
k8s.io/metrics v0.33.3
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3
|
||||
k8s.io/kube-aggregator v0.31.3
|
||||
k8s.io/metrics v0.31.3
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime v0.19.3
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
@@ -72,8 +72,8 @@ require (
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
@@ -91,7 +91,6 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
@@ -102,31 +101,32 @@ require (
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -144,7 +144,7 @@ require (
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.94 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/spdystream v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
@@ -181,7 +181,7 @@ require (
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
@@ -193,9 +193,9 @@ require (
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20250722052735-3ea24d208777
|
||||
|
||||
106
go.sum
106
go.sum
@@ -84,8 +84,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
@@ -95,8 +95,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 h1:IEjq88XO4PuBDcvmjQJcQGg+w+UaafSy8G5Kcb5tBhI=
|
||||
@@ -170,8 +170,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/bombsimon/logrusr/v3 v3.0.0 h1:tcAoLfuAhKP9npBxWzSdpsvKPQt1XV02nSf2lZA82TQ=
|
||||
github.com/bombsimon/logrusr/v3 v3.0.0/go.mod h1:PksPPgSFEL2I52pla2glgCyyd2OqOHAnFF5E+g8Ixco=
|
||||
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
|
||||
@@ -241,8 +239,8 @@ github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2T
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
@@ -284,9 +282,8 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
@@ -294,8 +291,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@@ -321,6 +318,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
@@ -352,10 +350,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -393,8 +389,8 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
||||
@@ -417,8 +413,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
@@ -459,6 +455,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -552,8 +550,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
|
||||
github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -586,13 +584,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
|
||||
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
@@ -806,8 +804,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -1208,6 +1206,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -1218,50 +1217,47 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
|
||||
k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8=
|
||||
k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE=
|
||||
k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs=
|
||||
k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8=
|
||||
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
|
||||
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
|
||||
k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE=
|
||||
k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4=
|
||||
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
|
||||
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
|
||||
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI=
|
||||
k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA=
|
||||
k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo=
|
||||
k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI=
|
||||
k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8=
|
||||
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
|
||||
k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA=
|
||||
k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg=
|
||||
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
|
||||
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-aggregator v0.33.3 h1:Pa6hQpKJMX0p0D2wwcxXJgu02++gYcGWXoW1z1ZJDfo=
|
||||
k8s.io/kube-aggregator v0.33.3/go.mod h1:hwvkUoQ8q6gv0+SgNnlmQ3eUue1zHhJKTHsX7BwxwSE=
|
||||
k8s.io/kube-aggregator v0.31.3 h1:DqHPdTglJHgOfB884AaroyxrML/aL82ASYOh65m7MSk=
|
||||
k8s.io/kube-aggregator v0.31.3/go.mod h1:Kx59Xjnf0SnY47qf9Or++4y3XCHQ3kR0xk1Di6KFiFU=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/metrics v0.33.3 h1:9CcqBz15JZfISqwca33gdHS8I6XfsK1vA8WUdEnG70g=
|
||||
k8s.io/metrics v0.33.3/go.mod h1:Aw+cdg4AYHw0HvUY+lCyq40FOO84awrqvJRTw0cmXDs=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/metrics v0.31.3 h1:DkT9I3gFlb2/z+/4BMY7WrQ/PnbukuV4Yli82v/KBCM=
|
||||
k8s.io/metrics v0.31.3/go.mod h1:2w9gpd8z+13oJmaPR6p3kDyrDqnxSyoKpnOw2qLIdhI=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
|
||||
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
|
||||
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
|
||||
sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@@ -366,7 +366,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
podVolumeBackupper: podVolumeBackupper,
|
||||
podVolumeSnapshotTracker: podvolume.NewTracker(),
|
||||
volumeSnapshotterCache: NewVolumeSnapshotterCache(volumeSnapshotterGetter),
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
itemHookHandler: &hook.DefaultItemHookHandler{
|
||||
PodCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
|
||||
@@ -3269,7 +3269,7 @@ func TestBackupWithSnapshots(t *testing.T) {
|
||||
err := h.backupper.Backup(h.log, tc.req, backupFile, nil, nil, tc.snapshotterGetter)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tc.want, tc.req.VolumeSnapshots.Get())
|
||||
assert.Equal(t, tc.want, tc.req.VolumeSnapshots)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4213,7 +4213,7 @@ func TestBackupWithPodVolume(t *testing.T) {
|
||||
assert.Equal(t, tc.want, req.PodVolumeBackups)
|
||||
|
||||
// this assumes that we don't have any test cases where some PVs should be snapshotted using a VolumeSnapshotter
|
||||
assert.Nil(t, req.VolumeSnapshots.Get())
|
||||
assert.Nil(t, req.VolumeSnapshots)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,11 +70,13 @@ type itemBackupper struct {
|
||||
discoveryHelper discovery.Helper
|
||||
podVolumeBackupper podvolume.Backupper
|
||||
podVolumeSnapshotTracker *podvolume.Tracker
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
kubernetesBackupper *kubernetesBackupper
|
||||
volumeSnapshotterCache *VolumeSnapshotterCache
|
||||
itemHookHandler hook.ItemHookHandler
|
||||
hookTracker *hook.HookTracker
|
||||
volumeHelperImpl volumehelper.VolumeHelper
|
||||
|
||||
itemHookHandler hook.ItemHookHandler
|
||||
snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter
|
||||
hookTracker *hook.HookTracker
|
||||
volumeHelperImpl volumehelper.VolumeHelper
|
||||
}
|
||||
|
||||
type FileForArchive struct {
|
||||
@@ -500,6 +502,30 @@ func (ib *itemBackupper) executeActions(
|
||||
return obj, itemFiles, nil
|
||||
}
|
||||
|
||||
// volumeSnapshotter instantiates and initializes a VolumeSnapshotter given a VolumeSnapshotLocation,
|
||||
// or returns an existing one if one's already been initialized for the location.
|
||||
func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) {
|
||||
if bs, ok := ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name]; ok {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
bs, err := ib.volumeSnapshotterGetter.GetVolumeSnapshotter(snapshotLocation.Spec.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := bs.Init(snapshotLocation.Spec.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ib.snapshotLocationVolumeSnapshotters == nil {
|
||||
ib.snapshotLocationVolumeSnapshotters = make(map[string]vsv1.VolumeSnapshotter)
|
||||
}
|
||||
ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name] = bs
|
||||
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// zoneLabelDeprecated is the label that stores availability-zone info
|
||||
// on PVs this is deprecated on Kubernetes >= 1.17.0
|
||||
// zoneLabel is the label that stores availability-zone info
|
||||
@@ -615,7 +641,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
for _, snapshotLocation := range ib.backupRequest.SnapshotLocations {
|
||||
log := log.WithField("volumeSnapshotLocation", snapshotLocation.Name)
|
||||
|
||||
bs, err := ib.volumeSnapshotterCache.SetNX(snapshotLocation)
|
||||
bs, err := ib.volumeSnapshotter(snapshotLocation)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting volume snapshotter for volume snapshot location")
|
||||
continue
|
||||
@@ -673,7 +699,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
snapshot.Status.Phase = volume.SnapshotPhaseCompleted
|
||||
snapshot.Status.ProviderSnapshotID = snapshotID
|
||||
}
|
||||
ib.backupRequest.VolumeSnapshots.Add(snapshot)
|
||||
ib.backupRequest.VolumeSnapshots = append(ib.backupRequest.VolumeSnapshots, snapshot)
|
||||
|
||||
// nil errors are automatically removed
|
||||
return kubeerrs.NewAggregate(errs)
|
||||
|
||||
@@ -17,8 +17,6 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
@@ -34,27 +32,11 @@ type itemKey struct {
|
||||
name string
|
||||
}
|
||||
|
||||
type SynchronizedVSList struct {
|
||||
sync.Mutex
|
||||
VolumeSnapshotList []*volume.Snapshot
|
||||
}
|
||||
|
||||
func (s *SynchronizedVSList) Add(vs *volume.Snapshot) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.VolumeSnapshotList = append(s.VolumeSnapshotList, vs)
|
||||
}
|
||||
|
||||
func (s *SynchronizedVSList) Get() []*volume.Snapshot {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.VolumeSnapshotList
|
||||
}
|
||||
|
||||
// Request is a request for a backup, with all references to other objects
|
||||
// materialized (e.g. backup/snapshot locations, includes/excludes, etc.)
|
||||
type Request struct {
|
||||
*velerov1api.Backup
|
||||
|
||||
StorageLocation *velerov1api.BackupStorageLocation
|
||||
SnapshotLocations []*velerov1api.VolumeSnapshotLocation
|
||||
NamespaceIncludesExcludes *collections.IncludesExcludes
|
||||
@@ -62,7 +44,7 @@ type Request struct {
|
||||
ResourceHooks []hook.ResourceHook
|
||||
ResolvedActions []framework.BackupItemResolvedActionV2
|
||||
ResolvedItemBlockActions []framework.ItemBlockResolvedAction
|
||||
VolumeSnapshots SynchronizedVSList
|
||||
VolumeSnapshots []*volume.Snapshot
|
||||
PodVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
BackedUpItems *backedUpItemsMap
|
||||
itemOperationsList *[]*itemoperation.BackupOperation
|
||||
@@ -98,7 +80,7 @@ func (r *Request) FillVolumesInformation() {
|
||||
}
|
||||
|
||||
r.VolumesInformation.SkippedPVs = skippedPVMap
|
||||
r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots.Get()
|
||||
r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots
|
||||
r.VolumesInformation.PodVolumeBackups = r.PodVolumeBackups
|
||||
r.VolumesInformation.BackupOperations = *r.GetItemOperationsList()
|
||||
r.VolumesInformation.BackupName = r.Backup.Name
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
|
||||
)
|
||||
|
||||
type VolumeSnapshotterCache struct {
|
||||
cache map[string]vsv1.VolumeSnapshotter
|
||||
mutex sync.Mutex
|
||||
getter VolumeSnapshotterGetter
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotterCache(getter VolumeSnapshotterGetter) *VolumeSnapshotterCache {
|
||||
return &VolumeSnapshotterCache{
|
||||
cache: make(map[string]vsv1.VolumeSnapshotter),
|
||||
getter: getter,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *VolumeSnapshotterCache) SetNX(location *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if snapshotter, exists := c.cache[location.Name]; exists {
|
||||
return snapshotter, nil
|
||||
}
|
||||
|
||||
snapshotter, err := c.getter.GetVolumeSnapshotter(location.Spec.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := snapshotter.Init(location.Spec.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.cache[location.Name] = snapshotter
|
||||
return snapshotter, nil
|
||||
}
|
||||
@@ -545,22 +545,24 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er
|
||||
return fmt.Errorf("fail to create go-client %w", err)
|
||||
}
|
||||
|
||||
if len(o.NodeAgentConfigMap) > 0 {
|
||||
// If either Linux or Windows node-agent is installed, and the node-agent-configmap
|
||||
// is specified, need to validate the ConfigMap.
|
||||
if (o.UseNodeAgent || o.UseNodeAgentWindows) && len(o.NodeAgentConfigMap) > 0 {
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.NodeAgentConfigMap, &velerotypes.NodeAgentConfigs{}); err != nil {
|
||||
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid: %w", o.NodeAgentConfigMap, err)
|
||||
return fmt.Errorf("--node-agent-configmap specified ConfigMap %s is invalid", o.NodeAgentConfigMap)
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.RepoMaintenanceJobConfigMap) > 0 {
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.RepoMaintenanceJobConfigMap, &velerotypes.JobConfigs{}); err != nil {
|
||||
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid: %w", o.RepoMaintenanceJobConfigMap, err)
|
||||
return fmt.Errorf("--repo-maintenance-job-configmap specified ConfigMap %s is invalid", o.RepoMaintenanceJobConfigMap)
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.BackupRepoConfigMap) > 0 {
|
||||
config := make(map[string]any)
|
||||
if err := kubeutil.VerifyJSONConfigs(c.Context(), o.Namespace, crClient, o.BackupRepoConfigMap, &config); err != nil {
|
||||
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid: %w", o.BackupRepoConfigMap, err)
|
||||
return fmt.Errorf("--backup-repository-configmap specified ConfigMap %s is invalid", o.BackupRepoConfigMap)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -308,8 +308,6 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
|
||||
}
|
||||
|
||||
privilegedFsBackup := s.dataPathConfigs != nil && s.dataPathConfigs.PrivilegedFsBackup
|
||||
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
@@ -329,12 +327,12 @@ func (s *nodeAgentServer) run() {
|
||||
}
|
||||
}
|
||||
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass)
|
||||
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
||||
}
|
||||
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup)
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass)
|
||||
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
||||
}
|
||||
|
||||
@@ -734,8 +734,8 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error {
|
||||
|
||||
// native snapshots phase will either be failed or completed right away
|
||||
// https://github.com/vmware-tanzu/velero/blob/de3ea52f0cc478e99efa7b9524c7f353514261a4/pkg/backup/item_backupper.go#L632-L639
|
||||
backup.Status.VolumeSnapshotsAttempted = len(backup.VolumeSnapshots.Get())
|
||||
for _, snap := range backup.VolumeSnapshots.Get() {
|
||||
backup.Status.VolumeSnapshotsAttempted = len(backup.VolumeSnapshots)
|
||||
for _, snap := range backup.VolumeSnapshots {
|
||||
if snap.Status.Phase == volume.SnapshotPhaseCompleted {
|
||||
backup.Status.VolumeSnapshotsCompleted++
|
||||
}
|
||||
@@ -882,7 +882,7 @@ func persistBackup(backup *pkgbackup.Request,
|
||||
}
|
||||
|
||||
// Velero-native volume snapshots (as opposed to CSI ones)
|
||||
nativeVolumeSnapshots, errs := encode.ToJSONGzip(backup.VolumeSnapshots.Get(), "native volumesnapshots list")
|
||||
nativeVolumeSnapshots, errs := encode.ToJSONGzip(backup.VolumeSnapshots, "native volumesnapshots list")
|
||||
if errs != nil {
|
||||
persistErrs = append(persistErrs, errs...)
|
||||
}
|
||||
|
||||
@@ -1047,7 +1047,7 @@ func TestRecallMaintenance(t *testing.T) {
|
||||
{
|
||||
name: "wait completion error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedErr: "error waiting incomplete repo maintenance job for repo repo: error listing maintenance job for repo repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedErr: "error waiting incomplete repo maintenance job for repo repo: error listing maintenance job for repo repo: no kind is registered for the type v1.JobList in scheme \"pkg/runtime/scheme.go:100\"",
|
||||
},
|
||||
{
|
||||
name: "no consolidate result",
|
||||
@@ -1105,7 +1105,7 @@ func TestRecallMaintenance(t *testing.T) {
|
||||
|
||||
err := r.recallMaintenance(t.Context(), backupRepo, velerotest.NewLogger())
|
||||
if test.expectedErr != "" {
|
||||
assert.ErrorContains(t, err, test.expectedErr)
|
||||
assert.EqualError(t, err, test.expectedErr)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -916,13 +916,6 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := r.client.Get(context.Background(), types.NamespacedName{
|
||||
Name: pvc.Spec.VolumeName,
|
||||
}, pv); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get source PV %s", pvc.Spec.VolumeName)
|
||||
}
|
||||
|
||||
nodeOS := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log)
|
||||
|
||||
if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil {
|
||||
@@ -970,8 +963,6 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
return &exposer.CSISnapshotExposeParam{
|
||||
SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot,
|
||||
SourceNamespace: du.Spec.SourceNamespace,
|
||||
SourcePVCName: pvc.Name,
|
||||
SourcePVName: pv.Name,
|
||||
StorageClass: du.Spec.CSISnapshot.StorageClass,
|
||||
HostingPodLabels: hostingPodLabels,
|
||||
HostingPodAnnotations: hostingPodAnnotation,
|
||||
|
||||
@@ -60,7 +60,7 @@ const (
|
||||
// NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance
|
||||
func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
|
||||
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeBackupReconciler {
|
||||
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeBackupReconciler {
|
||||
return &PodVolumeBackupReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -77,7 +77,6 @@ func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kub
|
||||
exposer: exposer.NewPodVolumeExposer(kubeClient, logger),
|
||||
cancelledPVB: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +97,6 @@ type PodVolumeBackupReconciler struct {
|
||||
resourceTimeout time.Duration
|
||||
cancelledPVB map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -839,7 +837,6 @@ func (r *PodVolumeBackupReconciler) setupExposeParam(pvb *velerov1api.PodVolumeB
|
||||
Resources: r.podResources,
|
||||
// Priority class name for the data mover pod, retrieved from node-agent-configmap
|
||||
PriorityClassName: r.dataMovePriorityClass,
|
||||
Privileged: r.privileged,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -151,8 +151,7 @@ func initPVBReconcilerWithError(needError ...error) (*PodVolumeBackupReconciler,
|
||||
corev1api.ResourceRequirements{},
|
||||
metrics.NewServerMetrics(),
|
||||
velerotest.NewLogger(),
|
||||
"", // dataMovePriorityClass
|
||||
false, // privileged
|
||||
"", // dataMovePriorityClass
|
||||
), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ import (
|
||||
|
||||
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
|
||||
logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeRestoreReconciler {
|
||||
logger logrus.FieldLogger, dataMovePriorityClass string) *PodVolumeRestoreReconciler {
|
||||
return &PodVolumeRestoreReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -72,7 +72,6 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
|
||||
exposer: exposer.NewPodVolumeExposer(kubeClient, logger),
|
||||
cancelledPVR: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,7 +90,6 @@ type PodVolumeRestoreReconciler struct {
|
||||
resourceTimeout time.Duration
|
||||
cancelledPVR map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -898,7 +896,6 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
|
||||
Resources: r.podResources,
|
||||
// Priority class name for the data mover pod, retrieved from node-agent-configmap
|
||||
PriorityClassName: r.dataMovePriorityClass,
|
||||
Privileged: r.privileged,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -617,7 +617,7 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
|
||||
|
||||
dataPathMgr := datapath.NewManager(1)
|
||||
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false), nil
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), ""), nil
|
||||
}
|
||||
|
||||
func TestPodVolumeRestoreReconcile(t *testing.T) {
|
||||
|
||||
@@ -229,7 +229,7 @@ func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Sch
|
||||
}
|
||||
|
||||
for _, backup := range backupList.Items {
|
||||
if backup.Status.Phase == "" || backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress {
|
||||
if backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress {
|
||||
log.Debugf("%s/%s still has backups that are in InProgress or New...", schedule.Namespace, schedule.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -149,13 +149,6 @@ func TestReconcileOfSchedule(t *testing.T) {
|
||||
expectedPhase: string(velerov1.SchedulePhaseEnabled),
|
||||
backup: builder.ForBackup("ns", "name-20220905120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Phase(velerov1.BackupPhaseNew).Result(),
|
||||
},
|
||||
{
|
||||
name: "schedule already has backup with empty phase (not yet reconciled).",
|
||||
schedule: newScheduleBuilder(velerov1.SchedulePhaseEnabled).CronSchedule("@every 5m").LastBackupTime("2000-01-01 00:00:00").Result(),
|
||||
fakeClockTime: "2017-01-01 12:00:00",
|
||||
expectedPhase: string(velerov1.SchedulePhaseEnabled),
|
||||
backup: builder.ForBackup("ns", "name-20220905120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Phase("").Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -222,10 +215,10 @@ func TestReconcileOfSchedule(t *testing.T) {
|
||||
backups := &velerov1.BackupList{}
|
||||
require.NoError(t, client.List(ctx, backups))
|
||||
|
||||
// If backup associated with schedule's status is in New or InProgress or empty phase,
|
||||
// If backup associated with schedule's status is in New or InProgress,
|
||||
// new backup shouldn't be submitted.
|
||||
if test.backup != nil &&
|
||||
(test.backup.Status.Phase == "" || test.backup.Status.Phase == velerov1.BackupPhaseNew || test.backup.Status.Phase == velerov1.BackupPhaseInProgress) {
|
||||
(test.backup.Status.Phase == velerov1.BackupPhaseNew || test.backup.Status.Phase == velerov1.BackupPhaseInProgress) {
|
||||
assert.Len(t, backups.Items, 1)
|
||||
require.NoError(t, client.Delete(ctx, test.backup))
|
||||
}
|
||||
@@ -486,19 +479,4 @@ func TestCheckIfBackupInNewOrProgress(t *testing.T) {
|
||||
reconciler = NewScheduleReconciler("namespace", logger, client, metrics.NewServerMetrics(), false)
|
||||
result = reconciler.checkIfBackupInNewOrProgress(testSchedule)
|
||||
assert.True(t, result)
|
||||
|
||||
// Clean backup in InProgress phase.
|
||||
err = client.Delete(ctx, inProgressBackup)
|
||||
require.NoError(t, err, "fail to delete backup in InProgress phase in TestCheckIfBackupInNewOrProgress: %v", err)
|
||||
|
||||
// Create backup with empty phase (not yet reconciled).
|
||||
emptyPhaseBackup := builder.ForBackup("ns", "backup-3").
|
||||
ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).
|
||||
Phase("").Result()
|
||||
err = client.Create(ctx, emptyPhaseBackup)
|
||||
require.NoError(t, err, "fail to create backup with empty phase in TestCheckIfBackupInNewOrProgress: %v", err)
|
||||
|
||||
reconciler = NewScheduleReconciler("namespace", logger, client, metrics.NewServerMetrics(), false)
|
||||
result = reconciler.checkIfBackupInNewOrProgress(testSchedule)
|
||||
assert.True(t, result)
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -49,12 +48,6 @@ type CSISnapshotExposeParam struct {
|
||||
// SourceNamespace is the original namespace of the volume that the snapshot is taken for
|
||||
SourceNamespace string
|
||||
|
||||
// SourcePVCName is the original name of the PVC that the snapshot is taken for
|
||||
SourcePVCName string
|
||||
|
||||
// SourcePVName is the name of PV for SourcePVC
|
||||
SourcePVName string
|
||||
|
||||
// AccessMode defines the mode to access the snapshot
|
||||
AccessMode string
|
||||
|
||||
@@ -196,7 +189,6 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
backupPVCReadOnly := false
|
||||
spcNoRelabeling := false
|
||||
backupPVCAnnotations := map[string]string{}
|
||||
intoleratableNodes := []string{}
|
||||
if value, exists := csiExposeParam.BackupPVCConfig[csiExposeParam.StorageClass]; exists {
|
||||
if value.StorageClass != "" {
|
||||
backupPVCStorageClass = value.StorageClass
|
||||
@@ -214,15 +206,6 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
if len(value.Annotations) > 0 {
|
||||
backupPVCAnnotations = value.Annotations
|
||||
}
|
||||
|
||||
if _, found := backupPVCAnnotations[util.VSphereCNSFastCloneAnno]; found {
|
||||
if n, err := kube.GetPVAttachedNodes(ctx, csiExposeParam.SourcePVName, e.kubeClient.StorageV1()); err != nil {
|
||||
curLog.WithField("source PV", csiExposeParam.SourcePVName).WithError(err).Warnf("Failed to get attached node for source PV, ignore %s annotation", util.VSphereCNSFastCloneAnno)
|
||||
delete(backupPVCAnnotations, util.VSphereCNSFastCloneAnno)
|
||||
} else {
|
||||
intoleratableNodes = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, backupPVCStorageClass, csiExposeParam.AccessMode, volumeSize, backupPVCReadOnly, backupPVCAnnotations)
|
||||
@@ -253,7 +236,6 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.O
|
||||
spcNoRelabeling,
|
||||
csiExposeParam.NodeOS,
|
||||
csiExposeParam.PriorityClassName,
|
||||
intoleratableNodes,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to create backup pod")
|
||||
@@ -381,13 +363,8 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
diag += kube.DiagnosePod(pod)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -397,7 +374,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
@@ -409,7 +386,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if vs != nil {
|
||||
diag += csi.DiagnoseVS(vs, events)
|
||||
diag += csi.DiagnoseVS(vs)
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
|
||||
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
|
||||
@@ -587,7 +564,6 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
spcNoRelabeling bool,
|
||||
nodeOS string,
|
||||
priorityClassName string,
|
||||
intoleratableNodes []string,
|
||||
) (*corev1api.Pod, error) {
|
||||
podName := ownerObject.Name
|
||||
|
||||
@@ -688,18 +664,6 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
}
|
||||
|
||||
var podAffinity *corev1api.Affinity
|
||||
if len(intoleratableNodes) > 0 {
|
||||
if affinity == nil {
|
||||
affinity = &kube.LoadAffinity{}
|
||||
}
|
||||
|
||||
affinity.NodeSelector.MatchExpressions = append(affinity.NodeSelector.MatchExpressions, metav1.LabelSelectorRequirement{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Values: intoleratableNodes,
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
})
|
||||
}
|
||||
|
||||
if affinity != nil {
|
||||
podAffinity = kube.ToSystemAffinity([]*kube.LoadAffinity{affinity})
|
||||
}
|
||||
|
||||
@@ -153,7 +153,6 @@ func TestCreateBackupPodWithPriorityClass(t *testing.T) {
|
||||
false, // spcNoRelabeling
|
||||
kube.NodeOSLinux,
|
||||
tc.expectedPriorityClass,
|
||||
nil,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tc.description)
|
||||
@@ -238,7 +237,6 @@ func TestCreateBackupPodWithMissingConfigMap(t *testing.T) {
|
||||
false, // spcNoRelabeling
|
||||
kube.NodeOSLinux,
|
||||
"", // empty priority class since config map is missing
|
||||
nil,
|
||||
)
|
||||
|
||||
// Should succeed even when config map is missing
|
||||
|
||||
@@ -39,11 +39,8 @@ import (
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
)
|
||||
|
||||
type reactor struct {
|
||||
@@ -159,31 +156,6 @@ func TestExpose(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pvName := "pv-1"
|
||||
volumeAttachement1 := &storagev1api.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "va1",
|
||||
},
|
||||
Spec: storagev1api.VolumeAttachmentSpec{
|
||||
Source: storagev1api.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
NodeName: "node-1",
|
||||
},
|
||||
}
|
||||
|
||||
volumeAttachement2 := &storagev1api.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "va2",
|
||||
},
|
||||
Spec: storagev1api.VolumeAttachmentSpec{
|
||||
Source: storagev1api.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
NodeName: "node-2",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotClientObj []runtime.Object
|
||||
@@ -197,7 +169,6 @@ func TestExpose(t *testing.T) {
|
||||
expectedReadOnlyPVC bool
|
||||
expectedBackupPVCStorageClass string
|
||||
expectedAffinity *corev1api.Affinity
|
||||
expectedPVCAnnotation map[string]string
|
||||
}{
|
||||
{
|
||||
name: "wait vs ready fail",
|
||||
@@ -653,117 +624,6 @@ func TestExpose(t *testing.T) {
|
||||
expectedBackupPVCStorageClass: "fake-sc-read-only",
|
||||
expectedAffinity: nil,
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get source node fail",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
SourcePVName: pvName,
|
||||
StorageClass: "fake-sc",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
"fake-sc": {
|
||||
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
},
|
||||
Affinity: nil,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
},
|
||||
kubeReactors: []reactor{
|
||||
{
|
||||
verb: "list",
|
||||
resource: "volumeattachments",
|
||||
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, errors.New("fake-create-error")
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedAffinity: nil,
|
||||
expectedPVCAnnotation: nil,
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get empty source node",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
SourcePVName: pvName,
|
||||
StorageClass: "fake-sc",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
"fake-sc": {
|
||||
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
},
|
||||
Affinity: nil,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
},
|
||||
expectedAffinity: nil,
|
||||
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
{
|
||||
name: "IntolerateSourceNode, get source nodes",
|
||||
ownerBackup: backup,
|
||||
exposeParam: CSISnapshotExposeParam{
|
||||
SnapshotName: "fake-vs",
|
||||
SourceNamespace: "fake-ns",
|
||||
SourcePVName: pvName,
|
||||
StorageClass: "fake-sc",
|
||||
AccessMode: AccessModeFileSystem,
|
||||
OperationTimeout: time.Millisecond,
|
||||
ExposeTimeout: time.Millisecond,
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
"fake-sc": {
|
||||
Annotations: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
},
|
||||
Affinity: nil,
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
vsObject,
|
||||
vscObj,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
daemonSet,
|
||||
volumeAttachement1,
|
||||
volumeAttachement2,
|
||||
},
|
||||
expectedAffinity: &corev1api.Affinity{
|
||||
NodeAffinity: &corev1api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1api.NodeSelector{
|
||||
NodeSelectorTerms: []corev1api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: corev1api.NodeSelectorOpNotIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPVCAnnotation: map[string]string{util.VSphereCNSFastCloneAnno: "true"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -845,12 +705,6 @@ func TestExpose(t *testing.T) {
|
||||
if test.expectedAffinity != nil {
|
||||
assert.Equal(t, test.expectedAffinity, backupPod.Spec.Affinity)
|
||||
}
|
||||
|
||||
if test.expectedPVCAnnotation != nil {
|
||||
assert.Equal(t, test.expectedPVCAnnotation, backupPVC.Annotations)
|
||||
} else {
|
||||
assert.Empty(t, backupPVC.Annotations)
|
||||
}
|
||||
} else {
|
||||
assert.EqualError(t, err, test.err)
|
||||
}
|
||||
@@ -1288,7 +1142,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1314,7 +1167,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1343,7 +1195,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1362,7 +1213,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1408,7 +1258,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1424,7 +1273,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1442,7 +1290,6 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1640,74 +1487,6 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&backupPVCWithVolumeName,
|
||||
&backupPV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
&backupVSWithVSC,
|
||||
&backupVSC,
|
||||
},
|
||||
expected: `begin diagnose CSI exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-6, message message-6
|
||||
PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VS event reason reason-4, message message-4
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -287,13 +287,8 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
diag += kube.DiagnosePod(pod)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -303,7 +298,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -549,7 +549,6 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -575,7 +574,6 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -604,7 +602,6 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -623,7 +620,6 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -762,60 +758,6 @@ Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&restorePV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-5, message message-5
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -73,9 +73,6 @@ type PodVolumeExposeParam struct {
|
||||
|
||||
// PriorityClassName is the priority class name for the data mover pod
|
||||
PriorityClassName string
|
||||
|
||||
// Privileged indicates whether to create the pod with a privileged container
|
||||
Privileged bool
|
||||
}
|
||||
|
||||
// PodVolumeExposer is the interfaces for a pod volume exposer
|
||||
@@ -156,7 +153,7 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj
|
||||
|
||||
curLog.WithField("path", path).Infof("Host path is retrieved for pod %s, volume %s", param.ClientPodName, param.ClientPodVolume)
|
||||
|
||||
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged)
|
||||
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to create hosting pod")
|
||||
}
|
||||
@@ -251,13 +248,8 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
|
||||
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
diag += kube.DiagnosePod(pod)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -277,7 +269,7 @@ func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.Ob
|
||||
}
|
||||
|
||||
func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string,
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool) (*corev1api.Pod, error) {
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string) (*corev1api.Pod, error) {
|
||||
hostingPodName := ownerObject.Name
|
||||
|
||||
containerName := string(ownerObject.UID)
|
||||
@@ -335,7 +327,6 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
var containerSecurityCtx *corev1api.SecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1api.PodOS{}
|
||||
if nodeOS == kube.NodeOSWindows {
|
||||
@@ -368,9 +359,6 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
|
||||
securityCtx = &corev1api.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
containerSecurityCtx = &corev1api.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
|
||||
podOS.Name = kube.NodeOSLinux
|
||||
@@ -406,7 +394,6 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
|
||||
Env: podInfo.env,
|
||||
EnvFrom: podInfo.envFrom,
|
||||
Resources: resources,
|
||||
SecurityContext: containerSecurityCtx,
|
||||
},
|
||||
},
|
||||
PriorityClassName: priorityClassName,
|
||||
|
||||
@@ -190,29 +190,6 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "succeed with privileged pod",
|
||||
ownerBackup: backup,
|
||||
exposeParam: PodVolumeExposeParam{
|
||||
ClientNamespace: "fake-ns",
|
||||
ClientPodName: "fake-client-pod",
|
||||
ClientPodVolume: "fake-client-volume",
|
||||
Privileged: true,
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
podWithNode,
|
||||
node,
|
||||
daemonSet,
|
||||
},
|
||||
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
return datapath.AccessPoint{
|
||||
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
|
||||
}, nil
|
||||
},
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -466,7 +443,6 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -492,7 +468,6 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -589,48 +564,6 @@ end diagnose pod volume exposer`,
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-4, message message-4
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -39,8 +39,6 @@ import (
|
||||
type PVCAction struct {
|
||||
log logrus.FieldLogger
|
||||
crClient crclient.Client
|
||||
// map[namespace]->[map[pvcVolumes]->[]podName]
|
||||
nsPVCs map[string]map[string][]string
|
||||
}
|
||||
|
||||
func NewPVCAction(f client.Factory) plugincommon.HandlerInitializer {
|
||||
@@ -80,18 +78,31 @@ func (a *PVCAction) GetRelatedItems(item runtime.Unstructured, backup *v1.Backup
|
||||
|
||||
// Adds pods mounting this PVC to ensure that multiple pods mounting the same RWX
|
||||
// volume get backed up together.
|
||||
pvcs, err := a.getPVCList(pvc.Namespace)
|
||||
pods := new(corev1api.PodList)
|
||||
err := a.crClient.List(context.Background(), pods, crclient.InNamespace(pvc.Namespace))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to list pods")
|
||||
}
|
||||
|
||||
for _, pod := range pvcs[pvc.Name] {
|
||||
a.log.Infof("Adding related Pod %s to PVC %s", pod, pvc.Name)
|
||||
relatedItems = append(relatedItems, velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.Pods,
|
||||
Namespace: pvc.Namespace,
|
||||
Name: pod,
|
||||
})
|
||||
for i := range pods.Items {
|
||||
for _, volume := range pods.Items[i].Spec.Volumes {
|
||||
if volume.VolumeSource.PersistentVolumeClaim == nil {
|
||||
continue
|
||||
}
|
||||
if volume.PersistentVolumeClaim.ClaimName == pvc.Name {
|
||||
if kube.IsPodRunning(&pods.Items[i]) != nil {
|
||||
a.log.Infof("Related pod %s is not running, not adding to ItemBlock for PVC %s", pods.Items[i].Name, pvc.Name)
|
||||
} else {
|
||||
a.log.Infof("Adding related Pod %s to PVC %s", pods.Items[i].Name, pvc.Name)
|
||||
relatedItems = append(relatedItems, velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.Pods,
|
||||
Namespace: pods.Items[i].Namespace,
|
||||
Name: pods.Items[i].Name,
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Gather groupedPVCs based on VGS label provided in the backup
|
||||
@@ -106,35 +117,6 @@ func (a *PVCAction) GetRelatedItems(item runtime.Unstructured, backup *v1.Backup
|
||||
return relatedItems, nil
|
||||
}
|
||||
|
||||
func (a *PVCAction) getPVCList(ns string) (map[string][]string, error) {
|
||||
if a.nsPVCs == nil {
|
||||
a.nsPVCs = make(map[string]map[string][]string)
|
||||
}
|
||||
pvcList, ok := a.nsPVCs[ns]
|
||||
if ok {
|
||||
return pvcList, nil
|
||||
}
|
||||
pvcList = make(map[string][]string)
|
||||
pods := new(corev1api.PodList)
|
||||
err := a.crClient.List(context.Background(), pods, crclient.InNamespace(ns))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to list pods")
|
||||
}
|
||||
for i := range pods.Items {
|
||||
if kube.IsPodRunning(&pods.Items[i]) != nil {
|
||||
a.log.Debugf("Pod %s is not running, not adding to Pod list for PVC IBA plugin", pods.Items[i].Name)
|
||||
continue
|
||||
}
|
||||
for _, volume := range pods.Items[i].Spec.Volumes {
|
||||
if volume.VolumeSource.PersistentVolumeClaim != nil {
|
||||
pvcList[volume.VolumeSource.PersistentVolumeClaim.ClaimName] = append(pvcList[volume.VolumeSource.PersistentVolumeClaim.ClaimName], pods.Items[i].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
a.nsPVCs[ns] = pvcList
|
||||
return pvcList, nil
|
||||
}
|
||||
|
||||
func (a *PVCAction) Name() string {
|
||||
return "PVCItemBlockAction"
|
||||
}
|
||||
|
||||
@@ -143,10 +143,6 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int
|
||||
return nil, errors.Errorf("data is not available in config map %s", configName)
|
||||
}
|
||||
|
||||
if len(cm.Data) > 1 {
|
||||
return nil, errors.Errorf("more than one keys are found in ConfigMap %s's data. only expect one", configName)
|
||||
}
|
||||
|
||||
jsonString := ""
|
||||
for _, v := range cm.Data {
|
||||
jsonString = v
|
||||
|
||||
@@ -249,7 +249,6 @@ func TestGetConfigs(t *testing.T) {
|
||||
cmWithValidData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"loadConcurrency\":{\"globalConfig\": 5}}").Result()
|
||||
cmWithPriorityClass := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"high-priority\"}").Result()
|
||||
cmWithPriorityClassAndOther := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"low-priority\", \"loadConcurrency\":{\"globalConfig\": 3}}").Result()
|
||||
cmWithMultipleKeysInData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key-1", "{}", "fake-key-2", "{}").Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -332,14 +331,6 @@ func TestGetConfigs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ConfigMap's Data has more than one key",
|
||||
namespace: "fake-ns",
|
||||
kubeClientObj: []runtime.Object{
|
||||
cmWithMultipleKeysInData,
|
||||
},
|
||||
expectErr: "more than one keys are found in ConfigMap node-agent-config's data. only expect one",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestEnsureRepo(t *testing.T) {
|
||||
namespace: "fake-ns",
|
||||
bsl: "fake-bsl",
|
||||
repositoryType: "fake-repo-type",
|
||||
err: "error getting backup repository list: no kind is registered for the type v1.BackupRepositoryList in scheme",
|
||||
err: "error getting backup repository list: no kind is registered for the type v1.BackupRepositoryList in scheme \"pkg/runtime/scheme.go:100\"",
|
||||
},
|
||||
{
|
||||
name: "success on existing repo",
|
||||
@@ -128,7 +128,7 @@ func TestEnsureRepo(t *testing.T) {
|
||||
|
||||
repo, err := ensurer.EnsureRepo(t.Context(), velerov1.DefaultNamespace, test.namespace, test.bsl, test.repositoryType)
|
||||
if err != nil {
|
||||
require.ErrorContains(t, err, test.err)
|
||||
require.EqualError(t, err, test.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func TestCreateBackupRepositoryAndWait(t *testing.T) {
|
||||
namespace: "fake-ns",
|
||||
bsl: "fake-bsl",
|
||||
repositoryType: "fake-repo-type",
|
||||
err: "unable to create backup repository resource: no kind is registered for the type v1.BackupRepository in scheme",
|
||||
err: "unable to create backup repository resource: no kind is registered for the type v1.BackupRepository in scheme \"pkg/runtime/scheme.go:100\"",
|
||||
},
|
||||
{
|
||||
name: "get repo fail",
|
||||
@@ -252,7 +252,7 @@ func TestCreateBackupRepositoryAndWait(t *testing.T) {
|
||||
RepositoryType: test.repositoryType,
|
||||
})
|
||||
if err != nil {
|
||||
require.ErrorContains(t, err, test.err)
|
||||
require.EqualError(t, err, test.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -449,35 +449,6 @@ func StartNewJob(
|
||||
return maintenanceJob.Name, nil
|
||||
}
|
||||
|
||||
// buildTolerationsForMaintenanceJob builds the tolerations for maintenance jobs.
|
||||
// It includes the required Windows toleration for backward compatibility and filters
|
||||
// tolerations from the Velero deployment to only include those with keys that are
|
||||
// in the ThirdPartyTolerations allowlist, following the same pattern as labels and annotations.
|
||||
func buildTolerationsForMaintenanceJob(deployment *appsv1api.Deployment) []corev1api.Toleration {
|
||||
// Start with the Windows toleration for backward compatibility
|
||||
windowsToleration := corev1api.Toleration{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "windows",
|
||||
}
|
||||
result := []corev1api.Toleration{windowsToleration}
|
||||
|
||||
// Filter tolerations from the Velero deployment to only include allowed ones
|
||||
// Only tolerations that exist on the deployment AND have keys in the allowlist are inherited
|
||||
deploymentTolerations := veleroutil.GetTolerationsFromVeleroServer(deployment)
|
||||
for _, k := range util.ThirdPartyTolerations {
|
||||
for _, toleration := range deploymentTolerations {
|
||||
if toleration.Key == k {
|
||||
result = append(result, toleration)
|
||||
break // Only add the first matching toleration for each allowed key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func getPriorityClassName(ctx context.Context, cli client.Client, config *velerotypes.JobConfigs, logger logrus.FieldLogger) string {
|
||||
// Use the priority class name from the global job configuration if available
|
||||
// Note: Priority class is only read from global config, not per-repository
|
||||
@@ -622,8 +593,15 @@ func buildJob(
|
||||
SecurityContext: podSecurityContext,
|
||||
Volumes: volumes,
|
||||
ServiceAccountName: serviceAccount,
|
||||
Tolerations: buildTolerationsForMaintenanceJob(deployment),
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Tolerations: []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "windows",
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -698,7 +698,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
{
|
||||
name: "list job error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme \"pkg/runtime/scheme.go:100\"",
|
||||
},
|
||||
{
|
||||
name: "job not exist",
|
||||
@@ -847,7 +847,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
history, err := WaitAllJobsComplete(test.ctx, fakeClient, repo, 3, velerotest.NewLogger())
|
||||
|
||||
if test.expectedError != "" {
|
||||
require.ErrorContains(t, err, test.expectedError)
|
||||
require.EqualError(t, err, test.expectedError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1481,291 +1481,3 @@ func TestBuildJobWithPriorityClassName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTolerationsForMaintenanceJob(t *testing.T) {
|
||||
windowsToleration := corev1api.Toleration{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "windows",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
deploymentTolerations []corev1api.Toleration
|
||||
expectedTolerations []corev1api.Toleration
|
||||
}{
|
||||
{
|
||||
name: "no tolerations should only include Windows toleration",
|
||||
deploymentTolerations: nil,
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty tolerations should only include Windows toleration",
|
||||
deploymentTolerations: []corev1api.Toleration{},
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-allowed toleration should not be inherited",
|
||||
deploymentTolerations: []corev1api.Toleration{
|
||||
{
|
||||
Key: "vng-ondemand",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "amd64",
|
||||
},
|
||||
},
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allowed toleration should be inherited",
|
||||
deploymentTolerations: []corev1api.Toleration{
|
||||
{
|
||||
Key: "kubernetes.azure.com/scalesetpriority",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "spot",
|
||||
},
|
||||
},
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
{
|
||||
Key: "kubernetes.azure.com/scalesetpriority",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "spot",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed allowed and non-allowed tolerations should only inherit allowed",
|
||||
deploymentTolerations: []corev1api.Toleration{
|
||||
{
|
||||
Key: "vng-ondemand", // not in allowlist
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "amd64",
|
||||
},
|
||||
{
|
||||
Key: "CriticalAddonsOnly", // in allowlist
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
{
|
||||
Key: "custom-key", // not in allowlist
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "custom-value",
|
||||
},
|
||||
},
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple allowed tolerations should all be inherited",
|
||||
deploymentTolerations: []corev1api.Toleration{
|
||||
{
|
||||
Key: "kubernetes.azure.com/scalesetpriority",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "spot",
|
||||
},
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
},
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
{
|
||||
Key: "kubernetes.azure.com/scalesetpriority",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "spot",
|
||||
},
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a deployment with the specified tolerations
|
||||
deployment := &appsv1api.Deployment{
|
||||
Spec: appsv1api.DeploymentSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Tolerations: tc.deploymentTolerations,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := buildTolerationsForMaintenanceJob(deployment)
|
||||
assert.Equal(t, tc.expectedTolerations, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildJobWithTolerationsInheritance(t *testing.T) {
|
||||
// Define allowed tolerations that would be set on Velero deployment
|
||||
allowedTolerations := []corev1api.Toleration{
|
||||
{
|
||||
Key: "kubernetes.azure.com/scalesetpriority",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "spot",
|
||||
},
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
}
|
||||
|
||||
// Mixed tolerations (allowed and non-allowed)
|
||||
mixedTolerations := []corev1api.Toleration{
|
||||
{
|
||||
Key: "vng-ondemand", // not in allowlist
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "amd64",
|
||||
},
|
||||
{
|
||||
Key: "CriticalAddonsOnly", // in allowlist
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
}
|
||||
|
||||
// Windows toleration that should always be present
|
||||
windowsToleration := corev1api.Toleration{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "windows",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
deploymentTolerations []corev1api.Toleration
|
||||
expectedTolerations []corev1api.Toleration
|
||||
}{
|
||||
{
|
||||
name: "no tolerations on deployment should only have Windows toleration",
|
||||
deploymentTolerations: nil,
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allowed tolerations should be inherited along with Windows toleration",
|
||||
deploymentTolerations: allowedTolerations,
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
{
|
||||
Key: "kubernetes.azure.com/scalesetpriority",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
Value: "spot",
|
||||
},
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed tolerations should only inherit allowed ones",
|
||||
deploymentTolerations: mixedTolerations,
|
||||
expectedTolerations: []corev1api.Toleration{
|
||||
windowsToleration,
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a new scheme and add necessary API types
|
||||
localScheme := runtime.NewScheme()
|
||||
err := velerov1api.AddToScheme(localScheme)
|
||||
require.NoError(t, err)
|
||||
err = appsv1api.AddToScheme(localScheme)
|
||||
require.NoError(t, err)
|
||||
err = batchv1api.AddToScheme(localScheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a deployment with the specified tolerations
|
||||
deployment := &appsv1api.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "velero",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: appsv1api.DeploymentSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "velero",
|
||||
Image: "velero/velero:latest",
|
||||
},
|
||||
},
|
||||
Tolerations: tc.deploymentTolerations,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a backup repository
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
VolumeNamespace: "velero",
|
||||
BackupStorageLocation: "default",
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake client and add the deployment
|
||||
client := fake.NewClientBuilder().WithScheme(localScheme).WithObjects(deployment).Build()
|
||||
|
||||
// Create minimal job configs and resources
|
||||
jobConfig := &velerotypes.JobConfigs{}
|
||||
logLevel := logrus.InfoLevel
|
||||
logFormat := logging.NewFormatFlag()
|
||||
logFormat.Set("text")
|
||||
|
||||
// Call buildJob
|
||||
job, err := buildJob(client, t.Context(), repo, "default", jobConfig, logLevel, logFormat, logrus.New())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the tolerations are set correctly
|
||||
assert.Equal(t, tc.expectedTolerations, job.Spec.Template.Spec.Tolerations)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +84,4 @@ type NodeAgentConfigs struct {
|
||||
|
||||
// PriorityClassName is the priority class name for data mover pods created by the node agent
|
||||
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||
|
||||
// PrivilegedFsBackup determines whether to create fs-backup pods as privileged pods
|
||||
PrivilegedFsBackup bool `json:"privilegedFsBackup,omitempty"`
|
||||
}
|
||||
|
||||
@@ -689,7 +689,7 @@ func WaitUntilVSCHandleIsReady(
|
||||
return vsc, nil
|
||||
}
|
||||
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) string {
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
vscName := ""
|
||||
readyToUse := false
|
||||
errMessage := ""
|
||||
@@ -710,14 +710,6 @@ func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) s
|
||||
|
||||
diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == vs.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("VS event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -1699,7 +1699,6 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
@@ -1782,81 +1781,11 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and empty event",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and events",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-vs-uid",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \nVS event reason reason-3, message message-3\nVS event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnoseVS(tc.vs, tc.events)
|
||||
diag := DiagnoseVS(tc.vs)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ limitations under the License.
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -183,13 +182,13 @@ func (es *eventSink) Create(event *corev1api.Event) (*corev1api.Event, error) {
|
||||
return event, nil
|
||||
}
|
||||
|
||||
return es.sink.CreateWithEventNamespaceWithContext(context.Background(), event)
|
||||
return es.sink.CreateWithEventNamespace(event)
|
||||
}
|
||||
|
||||
func (es *eventSink) Update(event *corev1api.Event) (*corev1api.Event, error) {
|
||||
return es.sink.UpdateWithEventNamespaceWithContext(context.Background(), event)
|
||||
return es.sink.UpdateWithEventNamespace(event)
|
||||
}
|
||||
|
||||
func (es *eventSink) Patch(event *corev1api.Event, data []byte) (*corev1api.Event, error) {
|
||||
return es.sink.PatchWithEventNamespaceWithContext(context.Background(), event, data)
|
||||
return es.sink.PatchWithEventNamespace(event, data)
|
||||
}
|
||||
|
||||
@@ -268,21 +268,13 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string {
|
||||
func DiagnosePod(pod *corev1api.Pod) string {
|
||||
diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName)
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
}
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -896,11 +896,10 @@ func TestDiagnosePod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pod with all info but event",
|
||||
name: "pod with all info",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
@@ -929,111 +928,11 @@ func TestDiagnosePod(t *testing.T) {
|
||||
},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and empty event list",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and events",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pod-uid",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\nPod event reason reason-3, message message-3\nPod event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePod(tc.pod, tc.events)
|
||||
diag := DiagnosePod(tc.pod)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -463,18 +463,8 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
}
|
||||
|
||||
func DiagnosePV(pv *corev1api.PersistentVolume) string {
|
||||
@@ -564,19 +554,3 @@ func GetPVAttachedNode(ctx context.Context, pv string, storageClient storagev1.S
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func GetPVAttachedNodes(ctx context.Context, pv string, storageClient storagev1.StorageV1Interface) ([]string, error) {
|
||||
vaList, err := storageClient.VolumeAttachments().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing volumeattachment")
|
||||
}
|
||||
|
||||
nodes := []string{}
|
||||
for _, va := range vaList.Items {
|
||||
if va.Spec.Source.PersistentVolumeName != nil && *va.Spec.Source.PersistentVolumeName == pv {
|
||||
nodes = append(nodes, va.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -1593,11 +1593,10 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pvc with all info but events",
|
||||
name: "pvc with all info",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
@@ -1612,83 +1611,11 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and empty events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pvc-uid",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\nPVC event reason reason-3, message message-3\nPVC event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePVC(tc.pvc, tc.events)
|
||||
diag := DiagnosePVC(tc.pvc)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -371,16 +371,15 @@ func VerifyJSONConfigs(ctx context.Context, namespace string, crClient client.Cl
|
||||
return errors.Errorf("data is not available in ConfigMap %s", configName)
|
||||
}
|
||||
|
||||
// Verify all the keys in ConfigMap's data.
|
||||
jsonString := ""
|
||||
for _, v := range cm.Data {
|
||||
jsonString = v
|
||||
}
|
||||
|
||||
configs := configType
|
||||
err = json.Unmarshal([]byte(jsonString), configs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName)
|
||||
}
|
||||
configs := configType
|
||||
err = json.Unmarshal([]byte(jsonString), configs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -28,7 +28,3 @@ var ThirdPartyTolerations = []string{
|
||||
"kubernetes.azure.com/scalesetpriority",
|
||||
"CriticalAddonsOnly",
|
||||
}
|
||||
|
||||
const (
|
||||
VSphereCNSFastCloneAnno = "csi.vsphere.volume/fast-provisioning"
|
||||
)
|
||||
|
||||
@@ -23,8 +23,6 @@ By default, `velero install` does not install Velero's [File System Backup][3].
|
||||
|
||||
If you've already run `velero install` without the `--use-node-agent` flag, you can run the same command again, including the `--use-node-agent` flag, to add the file system backup to your existing install.
|
||||
|
||||
Note that for some use cases (including installation on OpenShift clusters) the fs-backup pods must run in a Privileged security context. This is configured through the node-agent configmap (see below) by setting `privilegedFsBackup` to `true` in the configmap.
|
||||
|
||||
## CSI Snapshot Data Movement
|
||||
|
||||
Velero node-agent is required by [CSI Snapshot Data Movement][12] when Velero built-in data mover is used. By default, `velero install` does not install Velero's node-agent. To enable it, specify the `--use-node-agent` flag.
|
||||
|
||||
@@ -15,7 +15,7 @@ Note: If less resources are assigned to data mover pods, the data movement activ
|
||||
Refer to [Performance Guidance][3] for a guidance of performance vs. resource usage, and it is highly recommended that you perform your own testing to find the best resource limits for your data.
|
||||
|
||||
Velero introduces a new section in the node-agent configMap, called ```podResources```, through which you can set customized resources configurations for data mover pods.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-configmap```.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-config```.
|
||||
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
|
||||
### Sample
|
||||
@@ -39,19 +39,19 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-configmap=<configMap name>
|
||||
- --node-agent-config=<configMap name>
|
||||
```
|
||||
|
||||
### Priority Class
|
||||
@@ -126,4 +126,4 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/
|
||||
[4]: performance-guidance.md
|
||||
[4]: performance-guidance.md
|
||||
@@ -27,22 +27,22 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap`` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-configmap=<configMap name>
|
||||
- --node-agent-config=<configMap name>
|
||||
```
|
||||
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: node-agent-concurrency.md
|
||||
[4]: data-movement-node-selection.md
|
||||
[4]: data-movement-node-selection.md
|
||||
@@ -184,7 +184,7 @@ ginkgo: ${GOBIN}/ginkgo
|
||||
|
||||
# This target does not run if ginkgo is already in $GOBIN
|
||||
${GOBIN}/ginkgo:
|
||||
GOBIN=${GOBIN} go install github.com/onsi/ginkgo/v2/ginkgo@v2.22.0
|
||||
GOBIN=${GOBIN} go install github.com/onsi/ginkgo/v2/ginkgo@v2.19.0
|
||||
|
||||
.PHONY: run-e2e
|
||||
run-e2e: ginkgo
|
||||
|
||||
Reference in New Issue
Block a user