mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-31 00:52:07 +00:00
Compare commits
22 Commits
v1.16.1
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
32402880b6 | ||
|
|
c686c59360 | ||
|
|
c53f3fb4fb | ||
|
|
fb0abf8245 | ||
|
|
6676647706 | ||
|
|
4c3b7943f3 | ||
|
|
a60808256d | ||
|
|
befd9d4b51 | ||
|
|
5ae1caef9d | ||
|
|
cc2dc02cbc | ||
|
|
189a5b2836 | ||
|
|
0fc7e2f98a | ||
|
|
8adfd8d0b1 | ||
|
|
78fd58fb43 | ||
|
|
8f51c1c08c | ||
|
|
fd9f3fe79f | ||
|
|
043005c7a4 | ||
|
|
1017d7aa6a | ||
|
|
6709a8a24b | ||
|
|
3415f39a76 | ||
|
|
8aeb8a2e70 | ||
|
|
a8ce0fe3a4 |
37
.github/workflows/e2e-test-kind.yaml
vendored
37
.github/workflows/e2e-test-kind.yaml
vendored
@@ -11,6 +11,8 @@ jobs:
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
@@ -44,6 +46,26 @@ jobs:
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
|
||||
docker save velero:pr-test-linux-amd64 -o ./velero.tar
|
||||
# Check and build MinIO image once for all e2e tests
|
||||
- name: Check Bitnami MinIO Dockerfile version
|
||||
id: minio-version
|
||||
run: |
|
||||
DOCKERFILE_SHA=$(curl -s https://api.github.com/repos/bitnami/containers/commits?path=bitnami/minio/2025/debian-12/Dockerfile\&per_page=1 | jq -r '.[0].sha')
|
||||
echo "dockerfile_sha=${DOCKERFILE_SHA}" >> $GITHUB_OUTPUT
|
||||
- name: Cache MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
- name: Build MinIO Image from Bitnami Dockerfile
|
||||
if: steps.minio-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Building MinIO image from Bitnami Dockerfile..."
|
||||
git clone --depth 1 https://github.com/bitnami/containers.git /tmp/bitnami-containers
|
||||
cd /tmp/bitnami-containers/bitnami/minio/2025/debian-12
|
||||
docker build -t bitnami/minio:local .
|
||||
docker save bitnami/minio:local > ${{ github.workspace }}/minio-image.tar
|
||||
# Create json of k8s versions to test
|
||||
# from guide: https://stackoverflow.com/a/65094398/4590470
|
||||
setup-test-matrix:
|
||||
@@ -86,9 +108,20 @@ jobs:
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
# Fetch the pre-built MinIO image from the build job
|
||||
- name: Fetch built MinIO Image
|
||||
uses: actions/cache@v4
|
||||
id: minio-cache
|
||||
with:
|
||||
path: ./minio-image.tar
|
||||
key: minio-bitnami-${{ needs.build.outputs.minio-dockerfile-sha }}
|
||||
- name: Load MinIO Image
|
||||
run: |
|
||||
echo "Loading MinIO image..."
|
||||
docker load < ./minio-image.tar
|
||||
- name: Install MinIO
|
||||
run:
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
|
||||
run: |
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ROOT_USER=minio" -e "MINIO_ROOT_PASSWORD=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:local
|
||||
- uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
skipClusterLogsExport: true
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.8-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.11-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.8-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.11-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.60
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.73
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
@@ -82,4 +82,3 @@ COPY --from=velero-builder /output /
|
||||
COPY --from=restic-builder /output /
|
||||
|
||||
USER cnb:cnb
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.8-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.10-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.23.8 as tilt-helper
|
||||
FROM golang:1.23.11 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1,3 +1,27 @@
|
||||
## v1.16.2
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### All Changes
|
||||
* Update "Default Volumes to Fs Backup" to "File System Backup (Default)" (#9105, @shubham-pampattiwar)
|
||||
* Fix missing defaultVolumesToFsBackup flag output in Velero describe backup cmd (#9103, @shubham-pampattiwar)
|
||||
* Add imagePullSecrets inheritance for VGDP pod and maintenance job. (#9102, @blackpiglet)
|
||||
* Fix issue #9077, don't block backup deletion on list VS error (#9101, @Lyndon-Li)
|
||||
* Mounted cloud credentials should not be world-readable (#9094, @sseago)
|
||||
* Allow for proper tracking of multiple hooks per container (#9060, @sseago)
|
||||
* Add BSL status check for backup/restore operations. (#9010, @blackpiglet)
|
||||
|
||||
|
||||
## v1.16.1
|
||||
|
||||
### Download
|
||||
|
||||
1
changelogs/unreleased/9244-priyansh17
Normal file
1
changelogs/unreleased/9244-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Update AzureAD Microsoft Authentication Library to v1.5.0
|
||||
1
changelogs/unreleased/9261-priyansh17
Normal file
1
changelogs/unreleased/9261-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Backport to 1.16 (PR#9244 Update AzureAD Microsoft Authentication Library to v1.5.0)
|
||||
4
go.mod
4
go.mod
@@ -2,7 +2,7 @@ module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.8
|
||||
toolchain go1.23.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
@@ -75,7 +75,7 @@ require (
|
||||
cloud.google.com/go/monitoring v1.21.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -95,8 +95,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 h1:IEjq88XO4PuBDcvmjQJcQGg+w+UaafSy8G5Kcb5tBhI=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.23.8-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.23.11-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..5c5db077f 100644
|
||||
index 5f939c481..3ff6e6fa1 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,32 @@ require (
|
||||
@@ -24,32 +24,31 @@ require (
|
||||
github.com/restic/chunker v0.4.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
@@ -16,7 +16,7 @@ index 5f939c481..5c5db077f 100644
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.36.0
|
||||
+ golang.org/x/net v0.38.0
|
||||
+ golang.org/x/oauth2 v0.7.0
|
||||
+ golang.org/x/oauth2 v0.27.0
|
||||
+ golang.org/x/sync v0.12.0
|
||||
+ golang.org/x/sys v0.31.0
|
||||
+ golang.org/x/term v0.30.0
|
||||
@@ -27,10 +27,10 @@ index 5f939c481..5c5db077f 100644
|
||||
require (
|
||||
- cloud.google.com/go v0.108.0 // indirect
|
||||
- cloud.google.com/go/compute v1.15.1 // indirect
|
||||
+ cloud.google.com/go v0.110.0 // indirect
|
||||
+ cloud.google.com/go/compute v1.19.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
- cloud.google.com/go/iam v0.10.0 // indirect
|
||||
+ cloud.google.com/go v0.110.0 // indirect
|
||||
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
+ cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -49,7 +49,7 @@ index 5f939c481..5c5db077f 100644
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||
@@ -63,11 +63,13 @@ require (
|
||||
@@ -63,11 +62,13 @@ require (
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
@@ -66,26 +66,27 @@ index 5f939c481..5c5db077f 100644
|
||||
-go 1.18
|
||||
+go 1.23.0
|
||||
+
|
||||
+toolchain go1.23.7
|
||||
+toolchain go1.23.11
|
||||
\ No newline at end of file
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..836a9b274 100644
|
||||
index 026e1d2fa..d7857bb2b 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,26 @@
|
||||
@@ -1,23 +1,24 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
-cloud.google.com/go v0.108.0 h1:xntQwnfn8oHGX0crLVinvHM+AhXvi3QHQIEcX/2hiWk=
|
||||
-cloud.google.com/go v0.108.0/go.mod h1:lNUfQqusBJp0bgAg6qrHgYFYbTB+dOiob1itwnlD33Q=
|
||||
-cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
|
||||
-cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
|
||||
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
+cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
-cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI=
|
||||
-cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM=
|
||||
-cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
+cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
|
||||
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
|
||||
+cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
|
||||
@@ -105,7 +106,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do=
|
||||
@@ -54,6 +57,7 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
|
||||
@@ -54,6 +55,7 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
@@ -113,7 +114,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -70,8 +74,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
@@ -70,8 +72,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
@@ -124,7 +125,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -82,17 +86,18 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
@@ -82,17 +84,18 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -148,7 +149,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
@@ -114,6 +119,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
@@ -114,6 +117,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6 h1:nz7i1au+nDzgExfqW5Zl6q85XNTvYoGnM5DHiQC0yYs=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
@@ -156,7 +157,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.46 h1:Vo3tNmNXuj7ME5qrvN4iadO7b4mzu/RSFdUkUhaPldk=
|
||||
@@ -129,6 +135,7 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P
|
||||
@@ -129,6 +133,7 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P
|
||||
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
|
||||
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
@@ -164,7 +165,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -172,8 +179,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
@@ -172,8 +177,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
@@ -175,7 +176,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -189,17 +196,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
@@ -189,17 +194,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
@@ -186,8 +187,8 @@ index 026e1d2fa..836a9b274 100644
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
+golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -199,7 +200,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -214,17 +221,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
@@ -214,17 +219,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -223,7 +224,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -237,8 +244,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
@@ -237,8 +242,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
@@ -234,7 +235,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
@@ -246,15 +253,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
@@ -246,15 +251,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
@@ -254,7 +255,7 @@ index 026e1d2fa..836a9b274 100644
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -266,14 +273,15 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
@@ -266,14 +271,15 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
|
||||
@@ -71,7 +71,8 @@ func (n *namespacedFileStore) Path(selector *corev1api.SecretKeySelector) (strin
|
||||
|
||||
keyFilePath := filepath.Join(n.fsRoot, fmt.Sprintf("%s-%s", selector.Name, selector.Key))
|
||||
|
||||
file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
// owner RW perms, group R perms, no public perms
|
||||
file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to open credentials file for writing")
|
||||
}
|
||||
|
||||
@@ -46,6 +46,9 @@ type hookKey struct {
|
||||
// Container indicates the container hooks use.
|
||||
// For hooks specified in the backup/restore spec, the container might be the same under different hookName.
|
||||
container string
|
||||
// hookIndex contains the slice index for the specific hook, in order to track multiple hooks
|
||||
// for the same container
|
||||
hookIndex int
|
||||
}
|
||||
|
||||
// hookStatus records the execution status of a specific hook.
|
||||
@@ -83,7 +86,7 @@ func NewHookTracker() *HookTracker {
|
||||
// Add adds a hook to the hook tracker
|
||||
// Add must precede the Record for each individual hook.
|
||||
// In other words, a hook must be added to the tracker before its execution result is recorded.
|
||||
func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase HookPhase) {
|
||||
func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int) {
|
||||
ht.lock.Lock()
|
||||
defer ht.lock.Unlock()
|
||||
|
||||
@@ -94,6 +97,7 @@ func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName st
|
||||
container: container,
|
||||
hookPhase: hookPhase,
|
||||
hookName: hookName,
|
||||
hookIndex: hookIndex,
|
||||
}
|
||||
|
||||
if _, ok := ht.tracker[key]; !ok {
|
||||
@@ -108,7 +112,7 @@ func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName st
|
||||
// Record records the hook's execution status
|
||||
// Add must precede the Record for each individual hook.
|
||||
// In other words, a hook must be added to the tracker before its execution result is recorded.
|
||||
func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookFailed bool, hookErr error) error {
|
||||
func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int, hookFailed bool, hookErr error) error {
|
||||
ht.lock.Lock()
|
||||
defer ht.lock.Unlock()
|
||||
|
||||
@@ -119,6 +123,7 @@ func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName
|
||||
container: container,
|
||||
hookPhase: hookPhase,
|
||||
hookName: hookName,
|
||||
hookIndex: hookIndex,
|
||||
}
|
||||
|
||||
if _, ok := ht.tracker[key]; !ok {
|
||||
@@ -179,24 +184,24 @@ func NewMultiHookTracker() *MultiHookTracker {
|
||||
}
|
||||
|
||||
// Add adds a backup/restore hook to the tracker
|
||||
func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase) {
|
||||
func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int) {
|
||||
mht.lock.Lock()
|
||||
defer mht.lock.Unlock()
|
||||
|
||||
if _, ok := mht.trackers[name]; !ok {
|
||||
mht.trackers[name] = NewHookTracker()
|
||||
}
|
||||
mht.trackers[name].Add(podNamespace, podName, container, source, hookName, hookPhase)
|
||||
mht.trackers[name].Add(podNamespace, podName, container, source, hookName, hookPhase, hookIndex)
|
||||
}
|
||||
|
||||
// Record records a backup/restore hook execution status
|
||||
func (mht *MultiHookTracker) Record(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookFailed bool, hookErr error) error {
|
||||
func (mht *MultiHookTracker) Record(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookIndex int, hookFailed bool, hookErr error) error {
|
||||
mht.lock.RLock()
|
||||
defer mht.lock.RUnlock()
|
||||
|
||||
var err error
|
||||
if _, ok := mht.trackers[name]; ok {
|
||||
err = mht.trackers[name].Record(podNamespace, podName, container, source, hookName, hookPhase, hookFailed, hookErr)
|
||||
err = mht.trackers[name].Record(podNamespace, podName, container, source, hookName, hookPhase, hookIndex, hookFailed, hookErr)
|
||||
} else {
|
||||
err = fmt.Errorf("the backup/restore not exist in hook tracker, backup/restore name: %s", name)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestNewHookTracker(t *testing.T) {
|
||||
func TestHookTracker_Add(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -50,8 +50,8 @@ func TestHookTracker_Add(t *testing.T) {
|
||||
|
||||
func TestHookTracker_Record(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
err := tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
err := tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -67,10 +67,10 @@ func TestHookTracker_Record(t *testing.T) {
|
||||
assert.True(t, info.hookExecuted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
assert.Error(t, err)
|
||||
|
||||
err = tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", false, nil)
|
||||
err = tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.hookFailed)
|
||||
}
|
||||
@@ -78,29 +78,30 @@ func TestHookTracker_Record(t *testing.T) {
|
||||
func TestHookTracker_Stat(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", "")
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 0)
|
||||
tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 1)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
|
||||
attempted, failed := tracker.Stat()
|
||||
assert.Equal(t, 2, attempted)
|
||||
assert.Equal(t, 3, attempted)
|
||||
assert.Equal(t, 1, failed)
|
||||
}
|
||||
|
||||
func TestHookTracker_IsComplete(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0, true, fmt.Errorf("err"))
|
||||
assert.True(t, tracker.IsComplete())
|
||||
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
assert.False(t, tracker.IsComplete())
|
||||
}
|
||||
|
||||
func TestHookTracker_HookErrs(t *testing.T) {
|
||||
tracker := NewHookTracker()
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
|
||||
hookErrs := tracker.HookErrs()
|
||||
assert.Len(t, hookErrs, 1)
|
||||
@@ -109,7 +110,7 @@ func TestHookTracker_HookErrs(t *testing.T) {
|
||||
func TestMultiHookTracker_Add(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -118,6 +119,7 @@ func TestMultiHookTracker_Add(t *testing.T) {
|
||||
hookPhase: "",
|
||||
hookSource: HookSourceAnnotation,
|
||||
hookName: "h1",
|
||||
hookIndex: 0,
|
||||
}
|
||||
|
||||
_, ok := mht.trackers["restore1"].tracker[key]
|
||||
@@ -126,8 +128,8 @@ func TestMultiHookTracker_Add(t *testing.T) {
|
||||
|
||||
func TestMultiHookTracker_Record(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
err := mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
err := mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
|
||||
key := hookKey{
|
||||
podNamespace: "ns1",
|
||||
@@ -136,6 +138,7 @@ func TestMultiHookTracker_Record(t *testing.T) {
|
||||
hookPhase: "",
|
||||
hookSource: HookSourceAnnotation,
|
||||
hookName: "h1",
|
||||
hookIndex: 0,
|
||||
}
|
||||
|
||||
info := mht.trackers["restore1"].tracker[key]
|
||||
@@ -143,29 +146,31 @@ func TestMultiHookTracker_Record(t *testing.T) {
|
||||
assert.True(t, info.hookExecuted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
err = mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
assert.Error(t, err)
|
||||
|
||||
err = mht.Record("restore2", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
err = mht.Record("restore2", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMultiHookTracker_Stat(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Add("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "")
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", false, nil)
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Add("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 0)
|
||||
mht.Add("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 1)
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 0, false, nil)
|
||||
mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h2", "", 1, false, nil)
|
||||
|
||||
attempted, failed := mht.Stat("restore1")
|
||||
assert.Equal(t, 2, attempted)
|
||||
assert.Equal(t, 3, attempted)
|
||||
assert.Equal(t, 1, failed)
|
||||
}
|
||||
|
||||
func TestMultiHookTracker_Delete(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Delete("restore1")
|
||||
|
||||
_, ok := mht.trackers["restore1"]
|
||||
@@ -174,11 +179,11 @@ func TestMultiHookTracker_Delete(t *testing.T) {
|
||||
|
||||
func TestMultiHookTracker_IsComplete(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre)
|
||||
mht.Record("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true, fmt.Errorf("err"))
|
||||
mht.Add("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0)
|
||||
mht.Record("backup1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, 0, true, fmt.Errorf("err"))
|
||||
assert.True(t, mht.IsComplete("backup1"))
|
||||
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
assert.False(t, mht.IsComplete("restore1"))
|
||||
|
||||
assert.True(t, mht.IsComplete("restore2"))
|
||||
@@ -186,8 +191,8 @@ func TestMultiHookTracker_IsComplete(t *testing.T) {
|
||||
|
||||
func TestMultiHookTracker_HookErrs(t *testing.T) {
|
||||
mht := NewMultiHookTracker()
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "")
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
mht.Add("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0)
|
||||
mht.Record("restore1", "ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", 0, true, fmt.Errorf("err"))
|
||||
|
||||
hookErrs := mht.HookErrs("restore1")
|
||||
assert.Len(t, hookErrs, 1)
|
||||
|
||||
@@ -223,7 +223,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
hookFromAnnotations = getPodExecHookFromAnnotations(metadata.GetAnnotations(), "", log)
|
||||
}
|
||||
if hookFromAnnotations != nil {
|
||||
hookTracker.Add(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase)
|
||||
hookTracker.Add(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, 0)
|
||||
|
||||
hookLog := log.WithFields(
|
||||
logrus.Fields{
|
||||
@@ -239,7 +239,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
hookLog.WithError(errExec).Error("Error executing hook")
|
||||
hookFailed = true
|
||||
}
|
||||
errTracker := hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, hookFailed, errExec)
|
||||
errTracker := hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, 0, hookFailed, errExec)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -267,10 +267,10 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
hooks = resourceHook.Post
|
||||
}
|
||||
|
||||
for _, hook := range hooks {
|
||||
for i, hook := range hooks {
|
||||
if groupResource == kuberesource.Pods {
|
||||
if hook.Exec != nil {
|
||||
hookTracker.Add(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase)
|
||||
hookTracker.Add(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, i)
|
||||
// The remaining hooks will only be executed if modeFailError is nil.
|
||||
// Otherwise, execution will stop and only hook collection will occur.
|
||||
if modeFailError == nil {
|
||||
@@ -291,7 +291,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
||||
modeFailError = err
|
||||
}
|
||||
}
|
||||
errTracker := hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, hookFailed, err)
|
||||
errTracker := hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, i, hookFailed, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -534,6 +534,11 @@ type PodExecRestoreHook struct {
|
||||
HookSource string
|
||||
Hook velerov1api.ExecRestoreHook
|
||||
executed bool
|
||||
// hookIndex contains the slice index for the specific hook from the restore spec
|
||||
// in order to track multiple hooks. Stored here because restore hook results are recorded
|
||||
// outside of the original slice iteration
|
||||
// for the same container
|
||||
hookIndex int
|
||||
}
|
||||
|
||||
// GroupRestoreExecHooks returns a list of hooks to be executed in a pod grouped by
|
||||
@@ -561,12 +566,13 @@ func GroupRestoreExecHooks(
|
||||
if hookFromAnnotation.Container == "" {
|
||||
hookFromAnnotation.Container = pod.Spec.Containers[0].Name
|
||||
}
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "<from-annotation>", HookPhase(""))
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "<from-annotation>", HookPhase(""), 0)
|
||||
byContainer[hookFromAnnotation.Container] = []PodExecRestoreHook{
|
||||
{
|
||||
HookName: "<from-annotation>",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: *hookFromAnnotation,
|
||||
hookIndex: 0,
|
||||
},
|
||||
}
|
||||
return byContainer, nil
|
||||
@@ -579,7 +585,7 @@ func GroupRestoreExecHooks(
|
||||
if !rrh.Selector.applicableTo(kuberesource.Pods, namespace, labels) {
|
||||
continue
|
||||
}
|
||||
for _, rh := range rrh.RestoreHooks {
|
||||
for i, rh := range rrh.RestoreHooks {
|
||||
if rh.Exec == nil {
|
||||
continue
|
||||
}
|
||||
@@ -587,6 +593,7 @@ func GroupRestoreExecHooks(
|
||||
HookName: rrh.Name,
|
||||
Hook: *rh.Exec,
|
||||
HookSource: HookSourceSpec,
|
||||
hookIndex: i,
|
||||
}
|
||||
// default to false if attr WaitForReady not set
|
||||
if named.Hook.WaitForReady == nil {
|
||||
@@ -596,7 +603,7 @@ func GroupRestoreExecHooks(
|
||||
if named.Hook.Container == "" {
|
||||
named.Hook.Container = pod.Spec.Containers[0].Name
|
||||
}
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, HookPhase(""))
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, HookPhase(""), i)
|
||||
byContainer[named.Hook.Container] = append(byContainer[named.Hook.Container], named)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1151,6 +1151,7 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
WaitForReady: boolptr.False(),
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
{
|
||||
HookName: "hook1",
|
||||
@@ -1163,6 +1164,7 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute * 2},
|
||||
WaitForReady: boolptr.False(),
|
||||
},
|
||||
hookIndex: 2,
|
||||
},
|
||||
{
|
||||
HookName: "hook2",
|
||||
@@ -1175,6 +1177,7 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute * 4},
|
||||
WaitForReady: boolptr.True(),
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
},
|
||||
"container2": {
|
||||
@@ -1189,6 +1192,7 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
||||
WaitTimeout: metav1.Duration{Duration: time.Second * 3},
|
||||
WaitForReady: boolptr.False(),
|
||||
},
|
||||
hookIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -169,7 +169,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookLog.Error(err)
|
||||
errors = append(errors, err)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookFailed = true
|
||||
}
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hookFailed, hookErr)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, hookFailed, hookErr)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -239,7 +239,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
// containers to become ready.
|
||||
// Each unexecuted hook is logged as an error and this error will be returned from this function.
|
||||
for _, hooks := range byContainer {
|
||||
for _, hook := range hooks {
|
||||
for i, hook := range hooks {
|
||||
if hook.executed {
|
||||
continue
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
},
|
||||
)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
||||
@@ -1007,17 +1007,17 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
hookTracker1 := NewMultiHookTracker()
|
||||
hookTracker1.Add("restore1", "default", "my-pod", "container1", HookSourceAnnotation, "<from-annotation>", HookPhase(""))
|
||||
hookTracker1.Add("restore1", "default", "my-pod", "container1", HookSourceAnnotation, "<from-annotation>", HookPhase(""), 0)
|
||||
|
||||
hookTracker2 := NewMultiHookTracker()
|
||||
hookTracker2.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
hookTracker2.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""), 0)
|
||||
|
||||
hookTracker3 := NewMultiHookTracker()
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container2", HookSourceSpec, "my-hook-2", HookPhase(""))
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""), 0)
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container2", HookSourceSpec, "my-hook-2", HookPhase(""), 0)
|
||||
|
||||
hookTracker4 := NewMultiHookTracker()
|
||||
hookTracker4.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
hookTracker4.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""), 0)
|
||||
|
||||
tests1 := []struct {
|
||||
name string
|
||||
|
||||
@@ -217,6 +217,9 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
|
||||
d.Println()
|
||||
d.Printf("Velero-Native Snapshot PVs:\t%s\n", BoolPointerString(spec.SnapshotVolumes, "false", "true", "auto"))
|
||||
if spec.DefaultVolumesToFsBackup != nil {
|
||||
d.Printf("File System Backup (Default):\t%s\n", BoolPointerString(spec.DefaultVolumesToFsBackup, "false", "true", ""))
|
||||
}
|
||||
d.Printf("Snapshot Move Data:\t%s\n", BoolPointerString(spec.SnapshotMoveData, "false", "true", "auto"))
|
||||
if len(spec.DataMover) == 0 {
|
||||
s = defaultDataMover
|
||||
|
||||
@@ -281,6 +281,71 @@ Hooks:
|
||||
|
||||
OrderedResources:
|
||||
kind1: rs1-1, rs1-2
|
||||
`
|
||||
input4 := builder.ForBackup("test-ns", "test-backup-4").
|
||||
DefaultVolumesToFsBackup(true).
|
||||
StorageLocation("backup-location").
|
||||
Result().Spec
|
||||
|
||||
expect4 := `Namespaces:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
|
||||
Resources:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
Cluster-scoped: auto
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location: backup-location
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
File System Backup (Default): true
|
||||
Snapshot Move Data: auto
|
||||
Data Mover: velero
|
||||
|
||||
TTL: 0s
|
||||
|
||||
CSISnapshotTimeout: 0s
|
||||
ItemOperationTimeout: 0s
|
||||
|
||||
Hooks: <none>
|
||||
`
|
||||
|
||||
input5 := builder.ForBackup("test-ns", "test-backup-5").
|
||||
DefaultVolumesToFsBackup(false).
|
||||
StorageLocation("backup-location").
|
||||
Result().Spec
|
||||
|
||||
expect5 := `Namespaces:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
|
||||
Resources:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
Cluster-scoped: auto
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location: backup-location
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
File System Backup (Default): false
|
||||
Snapshot Move Data: auto
|
||||
Data Mover: velero
|
||||
|
||||
TTL: 0s
|
||||
|
||||
CSISnapshotTimeout: 0s
|
||||
ItemOperationTimeout: 0s
|
||||
|
||||
Hooks: <none>
|
||||
`
|
||||
|
||||
testcases := []struct {
|
||||
@@ -303,6 +368,16 @@ OrderedResources:
|
||||
input: input3,
|
||||
expect: expect3,
|
||||
},
|
||||
{
|
||||
name: "DefaultVolumesToFsBackup is true",
|
||||
input: input4,
|
||||
expect: expect4,
|
||||
},
|
||||
{
|
||||
name: "DefaultVolumesToFsBackup is false",
|
||||
input: input5,
|
||||
expect: expect5,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
|
||||
@@ -102,6 +102,100 @@ Backup Template:
|
||||
|
||||
Hooks: <none>
|
||||
|
||||
Last Backup: 2023-06-25 15:04:05 +0000 UTC
|
||||
`
|
||||
|
||||
input3 := builder.ForSchedule("velero", "schedule-3").
|
||||
Phase(velerov1api.SchedulePhaseEnabled).
|
||||
CronSchedule("0 0 * * *").
|
||||
Template(builder.ForBackup("velero", "backup-1").DefaultVolumesToFsBackup(true).Result().Spec).
|
||||
LastBackupTime("2023-06-25 15:04:05").Result()
|
||||
expect3 := `Name: schedule-3
|
||||
Namespace: velero
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
Phase: Enabled
|
||||
|
||||
Paused: false
|
||||
|
||||
Schedule: 0 0 * * *
|
||||
|
||||
Backup Template:
|
||||
Namespaces:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
|
||||
Resources:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
Cluster-scoped: auto
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location:
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
File System Backup (Default): true
|
||||
Snapshot Move Data: auto
|
||||
Data Mover: velero
|
||||
|
||||
TTL: 0s
|
||||
|
||||
CSISnapshotTimeout: 0s
|
||||
ItemOperationTimeout: 0s
|
||||
|
||||
Hooks: <none>
|
||||
|
||||
Last Backup: 2023-06-25 15:04:05 +0000 UTC
|
||||
`
|
||||
|
||||
input4 := builder.ForSchedule("velero", "schedule-4").
|
||||
Phase(velerov1api.SchedulePhaseEnabled).
|
||||
CronSchedule("0 0 * * *").
|
||||
Template(builder.ForBackup("velero", "backup-1").DefaultVolumesToFsBackup(false).Result().Spec).
|
||||
LastBackupTime("2023-06-25 15:04:05").Result()
|
||||
expect4 := `Name: schedule-4
|
||||
Namespace: velero
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
Phase: Enabled
|
||||
|
||||
Paused: false
|
||||
|
||||
Schedule: 0 0 * * *
|
||||
|
||||
Backup Template:
|
||||
Namespaces:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
|
||||
Resources:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
Cluster-scoped: auto
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Or label selector: <none>
|
||||
|
||||
Storage Location:
|
||||
|
||||
Velero-Native Snapshot PVs: auto
|
||||
File System Backup (Default): false
|
||||
Snapshot Move Data: auto
|
||||
Data Mover: velero
|
||||
|
||||
TTL: 0s
|
||||
|
||||
CSISnapshotTimeout: 0s
|
||||
ItemOperationTimeout: 0s
|
||||
|
||||
Hooks: <none>
|
||||
|
||||
Last Backup: 2023-06-25 15:04:05 +0000 UTC
|
||||
`
|
||||
|
||||
@@ -120,6 +214,16 @@ Last Backup: 2023-06-25 15:04:05 +0000 UTC
|
||||
input: input2,
|
||||
expect: expect2,
|
||||
},
|
||||
{
|
||||
name: "schedule with DefaultVolumesToFsBackup is true",
|
||||
input: input3,
|
||||
expect: expect3,
|
||||
},
|
||||
{
|
||||
name: "schedule with DefaultVolumesToFsBackup is false",
|
||||
input: input4,
|
||||
expect: expect4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
|
||||
@@ -56,6 +56,7 @@ import (
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -417,6 +418,13 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
|
||||
request.Status.ValidationErrors = append(request.Status.ValidationErrors,
|
||||
fmt.Sprintf("backup can't be created because backup storage location %s is currently in read-only mode", request.StorageLocation.Name))
|
||||
}
|
||||
|
||||
if !veleroutil.BSLIsAvailable(*request.StorageLocation) {
|
||||
request.Status.ValidationErrors = append(
|
||||
request.Status.ValidationErrors,
|
||||
fmt.Sprintf("backup can't be created because BackupStorageLocation %s is in Unavailable status. please create a new backup after the BSL becomes available", request.StorageLocation.Name),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// add the storage location as a label for easy filtering later.
|
||||
|
||||
@@ -156,7 +156,7 @@ func TestProcessBackupNonProcessedItems(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessBackupValidationFailures(t *testing.T) {
|
||||
defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Result()
|
||||
defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -184,7 +184,7 @@ func TestProcessBackupValidationFailures(t *testing.T) {
|
||||
{
|
||||
name: "backup for read-only backup location fails validation",
|
||||
backup: defaultBackup().StorageLocation("read-only").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result(),
|
||||
expectedErrs: []string{"backup can't be created because backup storage location read-only is currently in read-only mode"},
|
||||
},
|
||||
{
|
||||
@@ -200,6 +200,12 @@ func TestProcessBackupValidationFailures(t *testing.T) {
|
||||
backupLocation: defaultBackupLocation,
|
||||
expectedErrs: []string{"include-resources, exclude-resources and include-cluster-resources are old filter parameters.\ninclude-cluster-scoped-resources, exclude-cluster-scoped-resources, include-namespace-scoped-resources and exclude-namespace-scoped-resources are new filter parameters.\nThey cannot be used together"},
|
||||
},
|
||||
{
|
||||
name: "BSL in unavailable state",
|
||||
backup: defaultBackup().StorageLocation("unavailable").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "unavailable").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result(),
|
||||
expectedErrs: []string{"backup can't be created because BackupStorageLocation unavailable is in Unavailable status. please create a new backup after the BSL becomes available"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -593,7 +599,7 @@ func TestDefaultVolumesToResticDeprecation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessBackupCompletions(t *testing.T) {
|
||||
defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Default(true).Bucket("store-1").Result()
|
||||
defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Default(true).Bucket("store-1").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
|
||||
now, err := time.Parse(time.RFC1123Z, time.RFC1123Z)
|
||||
require.NoError(t, err)
|
||||
@@ -653,7 +659,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
{
|
||||
name: "backup with a specific backup location keeps it",
|
||||
backup: defaultBackup().StorageLocation("alt-loc").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "alt-loc").Bucket("store-1").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "alt-loc").Bucket("store-1").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result(),
|
||||
defaultVolumesToFsBackup: false,
|
||||
expectedResult: &velerov1api.Backup{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -693,6 +699,7 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-write").
|
||||
Bucket("store-1").
|
||||
AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite).
|
||||
Phase(velerov1api.BackupStorageLocationPhaseAvailable).
|
||||
Result(),
|
||||
defaultVolumesToFsBackup: true,
|
||||
expectedResult: &velerov1api.Backup{
|
||||
@@ -1415,11 +1422,13 @@ func TestProcessBackupCompletions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
defaultBSL := builder.ForBackupStorageLocation(velerov1api.DefaultNamespace, "bsl").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
tests := []struct {
|
||||
name string
|
||||
backup *velerov1api.Backup
|
||||
locations []*velerov1api.VolumeSnapshotLocation
|
||||
defaultLocations map[string]string
|
||||
bsl velerov1api.BackupStorageLocation
|
||||
expectedVolumeSnapshotLocationNames []string // adding these in the expected order will allow to test with better msgs in case of a test failure
|
||||
expectedErrors string
|
||||
expectedSuccess bool
|
||||
@@ -1433,6 +1442,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "some-name").Provider("fake-provider").Result(),
|
||||
},
|
||||
expectedErrors: "a VolumeSnapshotLocation CRD for the location random-name with the name specified in the backup spec needs to be created before this snapshot can be executed. Error: volumesnapshotlocations.velero.io \"random-name\" not found", expectedSuccess: false,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "duplicate locationName per provider: should filter out dups",
|
||||
@@ -1443,6 +1453,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1"},
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "multiple non-dupe location names per provider should error",
|
||||
@@ -1454,6 +1465,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedErrors: "more than one VolumeSnapshotLocation name specified for provider aws: aws-us-west-1; unexpected name was aws-us-east-1",
|
||||
expectedSuccess: false,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "no location name for the provider exists, only one VSL for the provider: use it",
|
||||
@@ -1463,6 +1475,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"},
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "no location name for the provider exists, no default, more than one VSL for the provider: error",
|
||||
@@ -1472,6 +1485,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(),
|
||||
},
|
||||
expectedErrors: "provider aws has more than one possible volume snapshot location, and none were specified explicitly or as a default",
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "no location name for the provider exists, more than one VSL for the provider: the provider's default should be added",
|
||||
@@ -1483,11 +1497,13 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"},
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "no existing location name and no default location name given",
|
||||
backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).Result(),
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "multiple location names for a provider, default location name for another provider",
|
||||
@@ -1499,6 +1515,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1", "some-name"},
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "location name does not correspond to any existing location and snapshotvolume disabled; should return error",
|
||||
@@ -1510,6 +1527,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: nil,
|
||||
expectedErrors: "a VolumeSnapshotLocation CRD for the location random-name with the name specified in the backup spec needs to be created before this snapshot can be executed. Error: volumesnapshotlocations.velero.io \"random-name\" not found", expectedSuccess: false,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "duplicate locationName per provider and snapshotvolume disabled; should return only one BSL",
|
||||
@@ -1520,6 +1538,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1"},
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "no location name for the provider exists, only one VSL created and snapshotvolume disabled; should return the VSL",
|
||||
@@ -1529,6 +1548,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"},
|
||||
expectedSuccess: true,
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
{
|
||||
name: "multiple location names for a provider, no default location and backup has no location defined, but snapshotvolume disabled, should return error",
|
||||
@@ -1539,6 +1559,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) {
|
||||
},
|
||||
expectedVolumeSnapshotLocationNames: nil,
|
||||
expectedErrors: "provider aws has more than one possible volume snapshot location, and none were specified explicitly or as a default",
|
||||
bsl: *defaultBSL,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -22,9 +22,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch/v5"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -37,8 +36,6 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/credentials"
|
||||
"github.com/vmware-tanzu/velero/internal/delete"
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
@@ -56,8 +53,10 @@ import (
|
||||
repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager"
|
||||
repotypes "github.com/vmware-tanzu/velero/pkg/repository/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -202,6 +201,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !veleroutil.BSLIsAvailable(*location) {
|
||||
err := r.patchDeleteBackupRequestWithError(ctx, dbr, fmt.Errorf("cannot delete backup because backup storage location %s is currently in Unavailable state", location.Name))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// if the request object has no labels defined, initialize an empty map since
|
||||
// we will be updating labels
|
||||
if dbr.Labels == nil {
|
||||
@@ -264,9 +268,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to download tarball for backup %s, skipping associated DeleteItemAction plugins", backup.Name)
|
||||
log.Info("Cleaning up CSI volumesnapshots")
|
||||
if err := r.deleteCSIVolumeSnapshots(ctx, backup, log); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
r.deleteCSIVolumeSnapshotsIfAny(ctx, backup, log)
|
||||
} else {
|
||||
defer closeAndRemoveFile(backupFile, r.logger)
|
||||
deleteCtx := &delete.Context{
|
||||
@@ -503,22 +505,22 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co
|
||||
return errs
|
||||
}
|
||||
|
||||
// deleteCSIVolumeSnapshots clean up the CSI snapshots created by the backup, this should be called when the backup is failed
|
||||
// deleteCSIVolumeSnapshotsIfAny clean up the CSI snapshots created by the backup, this should be called when the backup is failed
|
||||
// when it's running, e.g. due to velero pod restart, and the backup.tar is failed to be downloaded from storage.
|
||||
func (r *backupDeletionReconciler) deleteCSIVolumeSnapshots(ctx context.Context, backup *velerov1api.Backup, log logrus.FieldLogger) error {
|
||||
func (r *backupDeletionReconciler) deleteCSIVolumeSnapshotsIfAny(ctx context.Context, backup *velerov1api.Backup, log logrus.FieldLogger) {
|
||||
vsList := snapshotv1api.VolumeSnapshotList{}
|
||||
if err := r.Client.List(ctx, &vsList, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
|
||||
}),
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "error listing volume snapshots")
|
||||
log.WithError(err).Warnf("Could not list volume snapshots, abort")
|
||||
return
|
||||
}
|
||||
for _, item := range vsList.Items {
|
||||
vs := item
|
||||
csi.CleanupVolumeSnapshot(&vs, r.Client, log)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *backupDeletionReconciler) deletePodVolumeSnapshots(ctx context.Context, backup *velerov1api.Backup) []error {
|
||||
|
||||
@@ -126,6 +126,9 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
dbr := defaultTestDbr()
|
||||
td := setupBackupDeletionControllerTest(t, dbr, location, backup)
|
||||
@@ -254,7 +257,7 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
|
||||
t.Run("backup storage location is in read-only mode", func(t *testing.T) {
|
||||
backup := builder.ForBackup(velerov1api.DefaultNamespace, "foo").StorageLocation("default").Result()
|
||||
location := builder.ForBackupStorageLocation("velero", "default").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result()
|
||||
location := builder.ForBackupStorageLocation("velero", "default").Phase(velerov1api.BackupStorageLocationPhaseAvailable).AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result()
|
||||
|
||||
td := setupBackupDeletionControllerTest(t, defaultTestDbr(), location, backup)
|
||||
|
||||
@@ -268,6 +271,24 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
assert.Len(t, res.Status.Errors, 1)
|
||||
assert.Equal(t, "cannot delete backup because backup storage location default is currently in read-only mode", res.Status.Errors[0])
|
||||
})
|
||||
|
||||
t.Run("backup storage location is in unavailable state", func(t *testing.T) {
|
||||
backup := builder.ForBackup(velerov1api.DefaultNamespace, "foo").StorageLocation("default").Result()
|
||||
location := builder.ForBackupStorageLocation("velero", "default").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result()
|
||||
|
||||
td := setupBackupDeletionControllerTest(t, defaultTestDbr(), location, backup)
|
||||
|
||||
_, err := td.controller.Reconcile(context.TODO(), td.req)
|
||||
require.NoError(t, err)
|
||||
|
||||
res := &velerov1api.DeleteBackupRequest{}
|
||||
err = td.fakeClient.Get(ctx, td.req.NamespacedName, res)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Processed", string(res.Status.Phase))
|
||||
assert.Len(t, res.Status.Errors, 1)
|
||||
assert.Equal(t, "cannot delete backup because backup storage location default is currently in Unavailable state", res.Status.Errors[0])
|
||||
})
|
||||
|
||||
t.Run("full delete, no errors", func(t *testing.T) {
|
||||
input := defaultTestDbr()
|
||||
|
||||
@@ -297,6 +318,9 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotLocation := &velerov1api.VolumeSnapshotLocation{
|
||||
@@ -416,6 +440,9 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotLocation := &velerov1api.VolumeSnapshotLocation{
|
||||
@@ -518,6 +545,9 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotLocation := &velerov1api.VolumeSnapshotLocation{
|
||||
@@ -600,6 +630,9 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotLocation := &velerov1api.VolumeSnapshotLocation{
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/persistence"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -92,6 +93,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
}
|
||||
return ctrl.Result{}, errors.Wrapf(err, "error getting BackupStorageLocation %s", req.String())
|
||||
}
|
||||
if !veleroutil.BSLIsAvailable(*location) {
|
||||
log.Errorf("BackupStorageLocation is in unavailable state, skip syncing backup from it.")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
pluginManager := b.newPluginManager(log)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
@@ -62,6 +62,9 @@ func defaultLocation(namespace string) *velerov1api.BackupStorageLocation {
|
||||
},
|
||||
Default: true,
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,6 +144,9 @@ func defaultLocationWithLongerLocationName(namespace string) *velerov1api.Backup
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: velerov1api.BackupStorageLocationStatus{
|
||||
Phase: velerov1api.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,6 +183,21 @@ var _ = Describe("Backup Sync Reconciler", func() {
|
||||
namespace: "ns-1",
|
||||
location: defaultLocation("ns-1"),
|
||||
},
|
||||
{
|
||||
name: "unavailable BSL",
|
||||
namespace: "ns-1",
|
||||
location: builder.ForBackupStorageLocation("ns-1", "default").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result(),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
backupShouldSkipSync: true,
|
||||
},
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
backupShouldSkipSync: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "normal case",
|
||||
namespace: "ns-1",
|
||||
|
||||
@@ -18,6 +18,7 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -36,6 +37,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/constant"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -44,6 +46,7 @@ const (
|
||||
gcFailureBSLNotFound = "BSLNotFound"
|
||||
gcFailureBSLCannotGet = "BSLCannotGet"
|
||||
gcFailureBSLReadOnly = "BSLReadOnly"
|
||||
gcFailureBSLUnavailable = "BSLUnavailable"
|
||||
)
|
||||
|
||||
// gcReconciler creates DeleteBackupRequests for expired backups.
|
||||
@@ -144,12 +147,18 @@ func (c *gcReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re
|
||||
} else {
|
||||
backup.Labels[garbageCollectionFailure] = gcFailureBSLCannotGet
|
||||
}
|
||||
|
||||
if err := c.Update(ctx, backup); err != nil {
|
||||
log.WithError(err).Error("error updating backup labels")
|
||||
}
|
||||
return ctrl.Result{}, errors.Wrap(err, "error getting backup storage location")
|
||||
}
|
||||
|
||||
if !veleroutil.BSLIsAvailable(*loc) {
|
||||
log.Infof("BSL %s is unavailable, cannot gc backup", loc.Name)
|
||||
return ctrl.Result{}, fmt.Errorf("bsl %s is unavailable, cannot gc backup", loc.Name)
|
||||
}
|
||||
|
||||
if loc.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly {
|
||||
log.Infof("Backup cannot be garbage-collected because backup storage location %s is currently in read-only mode", loc.Name)
|
||||
backup.Labels[garbageCollectionFailure] = gcFailureBSLReadOnly
|
||||
|
||||
@@ -46,7 +46,7 @@ func mockGCReconciler(fakeClient kbclient.Client, fakeClock *testclocks.FakeCloc
|
||||
|
||||
func TestGCReconcile(t *testing.T) {
|
||||
fakeClock := testclocks.NewFakeClock(time.Now())
|
||||
defaultBackupLocation := builder.ForBackupStorageLocation(velerov1api.DefaultNamespace, "default").Result()
|
||||
defaultBackupLocation := builder.ForBackupStorageLocation(velerov1api.DefaultNamespace, "default").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -66,12 +66,12 @@ func TestGCReconcile(t *testing.T) {
|
||||
{
|
||||
name: "expired backup in read-only storage location is not deleted",
|
||||
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-only").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result(),
|
||||
},
|
||||
{
|
||||
name: "expired backup in read-write storage location is deleted",
|
||||
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-write").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-write").AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite).Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation("velero", "read-write").AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite).Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result(),
|
||||
},
|
||||
{
|
||||
name: "expired backup with no pending deletion requests is deleted",
|
||||
@@ -118,6 +118,12 @@ func TestGCReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "BSL is unavailable",
|
||||
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(),
|
||||
backupLocation: builder.ForBackupStorageLocation(velerov1api.DefaultNamespace, "default").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result(),
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -58,6 +58,7 @@ import (
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero"
|
||||
pkgrestoreUtil "github.com/vmware-tanzu/velero/pkg/util/velero/restore"
|
||||
)
|
||||
|
||||
@@ -393,6 +394,11 @@ func (r *restoreReconciler) validateAndComplete(restore *api.Restore) (backupInf
|
||||
return backupInfo{}, nil
|
||||
}
|
||||
|
||||
if !veleroutil.BSLIsAvailable(*info.location) {
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("the BSL %s is unavailable, cannot retrieve the backup. please retry the restore after the BSL becomes available", info.location.Name))
|
||||
return backupInfo{}, nil
|
||||
}
|
||||
|
||||
// Fill in the ScheduleName so it's easier to consume for metrics.
|
||||
if restore.Spec.ScheduleName == "" {
|
||||
restore.Spec.ScheduleName = info.backup.GetLabels()[api.ScheduleNameLabel]
|
||||
@@ -728,6 +734,10 @@ func (r *restoreReconciler) deleteExternalResources(restore *api.Restore) error
|
||||
return errors.Wrap(err, fmt.Sprintf("can't get backup info, backup: %s", restore.Spec.BackupName))
|
||||
}
|
||||
|
||||
if !veleroutil.BSLIsAvailable(*backupInfo.location) {
|
||||
return fmt.Errorf("bsl %s is unavailable, cannot get the backup info", backupInfo.location.Name)
|
||||
}
|
||||
|
||||
// delete restore files in object storage
|
||||
pluginManager := r.newPluginManager(r.logger)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestFetchBackupInfo(t *testing.T) {
|
||||
{
|
||||
name: "lister has backup",
|
||||
backupName: "backup-1",
|
||||
informerLocations: []*velerov1api.BackupStorageLocation{builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()},
|
||||
informerLocations: []*velerov1api.BackupStorageLocation{builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()},
|
||||
informerBackups: []*velerov1api.Backup{defaultBackup().StorageLocation("default").Result()},
|
||||
expectedRes: defaultBackup().StorageLocation("default").Result(),
|
||||
},
|
||||
@@ -74,7 +74,7 @@ func TestFetchBackupInfo(t *testing.T) {
|
||||
name: "lister does not have a backup, but backupSvc does",
|
||||
backupName: "backup-1",
|
||||
backupStoreBackup: defaultBackup().StorageLocation("default").Result(),
|
||||
informerLocations: []*velerov1api.BackupStorageLocation{builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()},
|
||||
informerLocations: []*velerov1api.BackupStorageLocation{builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()},
|
||||
informerBackups: []*velerov1api.Backup{defaultBackup().StorageLocation("default").Result()},
|
||||
expectedRes: defaultBackup().StorageLocation("default").Result(),
|
||||
},
|
||||
@@ -211,7 +211,7 @@ func TestProcessQueueItemSkips(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRestoreReconcile(t *testing.T) {
|
||||
defaultStorageLocation := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()
|
||||
defaultStorageLocation := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
|
||||
now, err := time.Parse(time.RFC1123Z, time.RFC1123Z)
|
||||
require.NoError(t, err)
|
||||
@@ -464,6 +464,22 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
expectedCompletedTime: ×tamp,
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", velerov1api.RestorePhaseInProgress).Result(),
|
||||
},
|
||||
{
|
||||
name: "Restore creation is rejected when BSL is unavailable",
|
||||
location: builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result(),
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", velerov1api.RestorePhaseNew).Result(),
|
||||
backup: defaultBackup().StorageLocation("default").Result(),
|
||||
expectedErr: false,
|
||||
expectedPhase: string(velerov1api.RestorePhaseNew),
|
||||
expectedValidationErrors: []string{"the BSL %s is unavailable, cannot retrieve the backup. please retry the restore after the BSL becomes available"},
|
||||
},
|
||||
{
|
||||
name: "Restore deletion is rejected when BSL is unavailable.",
|
||||
location: builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result(),
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", velerov1api.RestorePhaseCompleted).ObjectMeta(builder.WithFinalizers(ExternalResourcesFinalizer), builder.WithDeletionTimestamp(timestamp.Time)).Result(),
|
||||
backup: defaultBackup().StorageLocation("default").Result(),
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
formatFlag := logging.FormatText
|
||||
@@ -738,7 +754,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) {
|
||||
Result(),
|
||||
))
|
||||
|
||||
location := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()
|
||||
location := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
require.NoError(t, r.kbClient.Create(context.Background(), location))
|
||||
|
||||
restore = &velerov1api.Restore{
|
||||
@@ -797,7 +813,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
location := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()
|
||||
location := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
require.NoError(t, r.kbClient.Create(context.Background(), location))
|
||||
|
||||
require.NoError(t, r.kbClient.Create(
|
||||
|
||||
@@ -471,14 +471,14 @@ func TestWaitRestoreExecHook(t *testing.T) {
|
||||
|
||||
hookTracker2 := hook.NewMultiHookTracker()
|
||||
restoreName2 := "restore2"
|
||||
hookTracker2.Add(restoreName2, "ns", "pod", "con1", "s1", "h1", "")
|
||||
hookTracker2.Record(restoreName2, "ns", "pod", "con1", "s1", "h1", "", false, nil)
|
||||
hookTracker2.Add(restoreName2, "ns", "pod", "con1", "s1", "h1", "", 0)
|
||||
hookTracker2.Record(restoreName2, "ns", "pod", "con1", "s1", "h1", "", 0, false, nil)
|
||||
|
||||
hookTracker3 := hook.NewMultiHookTracker()
|
||||
restoreName3 := "restore3"
|
||||
podNs, podName, container, source, hookName := "ns", "pod", "con1", "s1", "h1"
|
||||
hookFailed, hookErr := true, fmt.Errorf("hook failed")
|
||||
hookTracker3.Add(restoreName3, podNs, podName, container, source, hookName, hook.PhasePre)
|
||||
hookTracker3.Add(restoreName3, podNs, podName, container, source, hookName, hook.PhasePre, 0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -546,7 +546,7 @@ func TestWaitRestoreExecHook(t *testing.T) {
|
||||
if tc.waitSec > 0 {
|
||||
go func() {
|
||||
time.Sleep(time.Second * time.Duration(tc.waitSec))
|
||||
tc.hookTracker.Record(tc.restore.Name, tc.podNs, tc.podName, tc.Container, tc.Source, tc.hookName, hook.PhasePre, tc.hookFailed, tc.hookErr)
|
||||
tc.hookTracker.Record(tc.restore.Name, tc.podNs, tc.podName, tc.Container, tc.Source, tc.hookName, hook.PhasePre, 0, tc.hookFailed, tc.hookErr)
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@@ -681,6 +681,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
SecurityContext: securityCtx,
|
||||
Tolerations: toleration,
|
||||
ImagePullSecrets: podInfo.imagePullSecrets,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -510,6 +510,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
SecurityContext: securityCtx,
|
||||
Tolerations: toleration,
|
||||
ImagePullSecrets: podInfo.imagePullSecrets,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -28,14 +28,15 @@ import (
|
||||
)
|
||||
|
||||
type inheritedPodInfo struct {
|
||||
image string
|
||||
serviceAccount string
|
||||
env []v1.EnvVar
|
||||
envFrom []v1.EnvFromSource
|
||||
volumeMounts []v1.VolumeMount
|
||||
volumes []v1.Volume
|
||||
logLevelArgs []string
|
||||
logFormatArgs []string
|
||||
image string
|
||||
serviceAccount string
|
||||
env []v1.EnvVar
|
||||
envFrom []v1.EnvFromSource
|
||||
volumeMounts []v1.VolumeMount
|
||||
volumes []v1.Volume
|
||||
logLevelArgs []string
|
||||
logFormatArgs []string
|
||||
imagePullSecrets []v1.LocalObjectReference
|
||||
}
|
||||
|
||||
func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veleroNamespace string, osType string) (inheritedPodInfo, error) {
|
||||
@@ -71,5 +72,7 @@ func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veler
|
||||
}
|
||||
}
|
||||
|
||||
podInfo.imagePullSecrets = podSpec.ImagePullSecrets
|
||||
|
||||
return podInfo, nil
|
||||
}
|
||||
|
||||
@@ -26,11 +26,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
func TestGetInheritedPodInfo(t *testing.T) {
|
||||
@@ -177,6 +177,11 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "sa-1",
|
||||
ImagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -317,6 +322,11 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
||||
"--log-level",
|
||||
"debug",
|
||||
},
|
||||
imagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/velero"
|
||||
)
|
||||
@@ -177,7 +178,9 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
Name: "cloud-credentials",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "cloud-credentials",
|
||||
// read-only for Owner, Group, Public
|
||||
DefaultMode: ptr.To(int32(0444)),
|
||||
SecretName: "cloud-credentials",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
@@ -404,7 +405,9 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
||||
Name: "cloud-credentials",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "cloud-credentials",
|
||||
// read-only for Owner, Group, Public
|
||||
DefaultMode: ptr.To(int32(0444)),
|
||||
SecretName: "cloud-credentials",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -407,8 +407,16 @@ func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.Backu
|
||||
return maintenanceJob.Name, nil
|
||||
}
|
||||
|
||||
func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs,
|
||||
podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag) (*batchv1.Job, error) {
|
||||
func buildJob(
|
||||
cli client.Client,
|
||||
ctx context.Context,
|
||||
repo *velerov1api.BackupRepository,
|
||||
bslName string,
|
||||
config *JobConfigs,
|
||||
podResources kube.PodResources,
|
||||
logLevel logrus.Level,
|
||||
logFormat *logging.FormatFlag,
|
||||
) (*batchv1.Job, error) {
|
||||
// Get the Velero server deployment
|
||||
deployment := &appsv1.Deployment{}
|
||||
err := cli.Get(ctx, types.NamespacedName{Name: "velero", Namespace: repo.Namespace}, deployment)
|
||||
@@ -431,6 +439,8 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
||||
// Get the service account from the Velero server deployment
|
||||
serviceAccount := veleroutil.GetServiceAccountFromVeleroServer(deployment)
|
||||
|
||||
imagePullSecrets := veleroutil.GetImagePullSecretsFromVeleroServer(deployment)
|
||||
|
||||
// Get image
|
||||
image := veleroutil.GetVeleroServerImage(deployment)
|
||||
|
||||
@@ -520,6 +530,7 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
||||
Value: "windows",
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -903,6 +903,11 @@ func TestBuildJob(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -912,17 +917,18 @@ func TestBuildJob(t *testing.T) {
|
||||
deploy2.Spec.Template.Labels = map[string]string{"azure.workload.identity/use": "fake-label-value"}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
m *JobConfigs
|
||||
deploy *appsv1.Deployment
|
||||
logLevel logrus.Level
|
||||
logFormat *logging.FormatFlag
|
||||
thirdPartyLabel map[string]string
|
||||
expectedJobName string
|
||||
expectedError bool
|
||||
expectedEnv []v1.EnvVar
|
||||
expectedEnvFrom []v1.EnvFromSource
|
||||
expectedPodLabel map[string]string
|
||||
name string
|
||||
m *JobConfigs
|
||||
deploy *appsv1.Deployment
|
||||
logLevel logrus.Level
|
||||
logFormat *logging.FormatFlag
|
||||
thirdPartyLabel map[string]string
|
||||
expectedJobName string
|
||||
expectedError bool
|
||||
expectedEnv []v1.EnvVar
|
||||
expectedEnvFrom []v1.EnvFromSource
|
||||
expectedPodLabel map[string]string
|
||||
expectedImagePullSecrets []v1.LocalObjectReference
|
||||
}{
|
||||
{
|
||||
name: "Valid maintenance job without third party labels",
|
||||
@@ -964,6 +970,11 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedPodLabel: map[string]string{
|
||||
RepositoryNameLabel: "test-123",
|
||||
},
|
||||
expectedImagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels",
|
||||
@@ -1006,6 +1017,11 @@ func TestBuildJob(t *testing.T) {
|
||||
RepositoryNameLabel: "test-123",
|
||||
"azure.workload.identity/use": "fake-label-value",
|
||||
},
|
||||
expectedImagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error getting Velero server deployment",
|
||||
@@ -1057,7 +1073,16 @@ func TestBuildJob(t *testing.T) {
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build()
|
||||
|
||||
// Call the function to test
|
||||
job, err := buildJob(cli, context.TODO(), param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat)
|
||||
job, err := buildJob(
|
||||
cli,
|
||||
context.TODO(),
|
||||
param.BackupRepo,
|
||||
param.BackupLocation.Name,
|
||||
tc.m,
|
||||
*tc.m.PodResources,
|
||||
tc.logLevel,
|
||||
tc.logFormat,
|
||||
)
|
||||
|
||||
// Check the error
|
||||
if tc.expectedError {
|
||||
@@ -1108,6 +1133,8 @@ func TestBuildJob(t *testing.T) {
|
||||
assert.Equal(t, expectedArgs, container.Args)
|
||||
|
||||
assert.Equal(t, tc.expectedPodLabel, job.Spec.Template.Labels)
|
||||
|
||||
assert.Equal(t, tc.expectedImagePullSecrets, job.Spec.Template.Spec.ImagePullSecrets)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -63,14 +63,6 @@ type FakeRestoreProgressUpdater struct {
|
||||
func (f *FakeRestoreProgressUpdater) UpdateProgress(p *uploader.Progress) {}
|
||||
|
||||
func TestRunBackup(t *testing.T) {
|
||||
mockBRepo := udmrepomocks.NewBackupRepo(t)
|
||||
mockBRepo.On("GetAdvancedFeatures").Return(udmrepo.AdvancedFeatureInfo{})
|
||||
|
||||
var kp kopiaProvider
|
||||
kp.log = logrus.New()
|
||||
kp.bkRepo = mockBRepo
|
||||
updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hookBackupFunc func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error)
|
||||
@@ -102,6 +94,14 @@ func TestRunBackup(t *testing.T) {
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockBRepo := udmrepomocks.NewBackupRepo(t)
|
||||
mockBRepo.On("GetAdvancedFeatures").Return(udmrepo.AdvancedFeatureInfo{})
|
||||
|
||||
var kp kopiaProvider
|
||||
kp.log = logrus.New()
|
||||
kp.bkRepo = mockBRepo
|
||||
updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()}
|
||||
|
||||
if tc.volMode == "" {
|
||||
tc.volMode = uploader.PersistentVolumeFilesystem
|
||||
}
|
||||
@@ -117,10 +117,6 @@ func TestRunBackup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunRestore(t *testing.T) {
|
||||
var kp kopiaProvider
|
||||
kp.log = logrus.New()
|
||||
updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error)
|
||||
@@ -153,6 +149,10 @@ func TestRunRestore(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var kp kopiaProvider
|
||||
kp.log = logrus.New()
|
||||
updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()}
|
||||
|
||||
if tc.volMode == "" {
|
||||
tc.volMode = uploader.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ package velero
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
// GetNodeSelectorFromVeleroServer get the node selector from the Velero server deployment
|
||||
@@ -73,6 +75,11 @@ func GetServiceAccountFromVeleroServer(deployment *appsv1.Deployment) string {
|
||||
return deployment.Spec.Template.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
// GetImagePullSecretsFromVeleroServer get the image pull secrets from the Velero server deployment
|
||||
func GetImagePullSecretsFromVeleroServer(deployment *appsv1.Deployment) []v1.LocalObjectReference {
|
||||
return deployment.Spec.Template.Spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
// getVeleroServerImage get the image of the Velero server deployment
|
||||
func GetVeleroServerImage(deployment *appsv1.Deployment) string {
|
||||
return deployment.Spec.Template.Spec.Containers[0].Image
|
||||
@@ -105,3 +112,7 @@ func GetVeleroServerAnnotationValue(deployment *appsv1.Deployment, key string) s
|
||||
|
||||
return deployment.Spec.Template.Annotations[key]
|
||||
}
|
||||
|
||||
func BSLIsAvailable(bsl velerov1api.BackupStorageLocation) bool {
|
||||
return bsl.Status.Phase == velerov1api.BackupStorageLocationPhaseAvailable
|
||||
}
|
||||
|
||||
@@ -21,9 +21,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
)
|
||||
|
||||
func TestGetNodeSelectorFromVeleroServer(t *testing.T) {
|
||||
@@ -579,6 +583,63 @@ func TestGetServiceAccountFromVeleroServer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagePullSecretsFromVeleroServer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deploy *appsv1.Deployment
|
||||
want []v1.LocalObjectReference
|
||||
}{
|
||||
{
|
||||
name: "no image pull secrets",
|
||||
deploy: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "image pull secrets",
|
||||
deploy: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
ImagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
{
|
||||
Name: "imagePullSecret2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
{
|
||||
Name: "imagePullSecret2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := GetImagePullSecretsFromVeleroServer(test.deploy)
|
||||
|
||||
require.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVeleroServerImage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -759,3 +820,11 @@ func TestGetVeleroServerLabelValue(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBSLIsAvailable(t *testing.T) {
|
||||
availableBSL := builder.ForBackupStorageLocation("velero", "available").Phase(velerov1api.BackupStorageLocationPhaseAvailable).Result()
|
||||
unavailableBSL := builder.ForBackupStorageLocation("velero", "unavailable").Phase(velerov1api.BackupStorageLocationPhaseUnavailable).Result()
|
||||
|
||||
assert.True(t, BSLIsAvailable(*availableBSL))
|
||||
assert.False(t, BSLIsAvailable(*unavailableBSL))
|
||||
}
|
||||
|
||||
@@ -87,13 +87,13 @@ var ImagesMatrix = map[string]map[string][]string{
|
||||
"velero-restore-helper": {"velero/velero-restore-helper:v1.15.2"},
|
||||
},
|
||||
"v1.16": {
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.12.0"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.12.0"},
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.12.1"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.12.1"},
|
||||
"vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.12.0"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:v1.12.0"},
|
||||
"velero": {"velero/velero:v1.15.0"},
|
||||
"velero-restore-helper": {"velero/velero:v1.16.0"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.12.1"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:v1.12.1"},
|
||||
"velero": {"velero/velero:v1.16.1"},
|
||||
"velero-restore-helper": {"velero/velero:v1.16.1"},
|
||||
},
|
||||
"main": {
|
||||
"aws": {"velero/velero-plugin-for-aws:main"},
|
||||
|
||||
Reference in New Issue
Block a user