mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-25 06:02:06 +00:00
Compare commits
29 Commits
dependabot
...
v1.16.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2eb97fa8b1 | ||
|
|
f64fb36508 | ||
|
|
4bd86f1275 | ||
|
|
18ef5e61ad | ||
|
|
01aa5385b5 | ||
|
|
361717296b | ||
|
|
82dce51004 | ||
|
|
659a352ed1 | ||
|
|
9eeea4f211 | ||
|
|
e1068d6062 | ||
|
|
bcd3d513c4 | ||
|
|
5e87c3d48e | ||
|
|
ed68b43acd | ||
|
|
acc8cc41c3 | ||
|
|
f1271372e8 | ||
|
|
4b39481776 | ||
|
|
80837ee2ac | ||
|
|
8de844b8d3 | ||
|
|
2809de9ead | ||
|
|
ea9b4f37f3 | ||
|
|
7bad9df51d | ||
|
|
0c36cc82c1 | ||
|
|
0d4fb1fd5e | ||
|
|
8f31599fe4 | ||
|
|
f8ae1495ac | ||
|
|
b469d9f427 | ||
|
|
87084ce3c7 | ||
|
|
3df026ffdb | ||
|
|
406a730c2a |
4
.github/workflows/e2e-test-kind.yaml
vendored
4
.github/workflows/e2e-test-kind.yaml
vendored
@@ -121,6 +121,8 @@ jobs:
|
||||
curl -LO https://dl.k8s.io/release/v${{ matrix.k8s }}/bin/linux/amd64/kubectl
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
|
||||
git clone https://github.com/vmware-tanzu-experiments/distributed-data-generator.git -b main /tmp/kibishii
|
||||
|
||||
GOPATH=~/go \
|
||||
CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws \
|
||||
@@ -132,7 +134,9 @@ jobs:
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential \
|
||||
ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
VELERO_IMAGE=velero:pr-test-linux-amd64 \
|
||||
PLUGINS=velero/velero-plugin-for-aws:latest \
|
||||
GINKGO_LABELS="${{ matrix.labels }}" \
|
||||
KIBISHII_DIRECTORY=/tmp/kibishii/kubernetes/yaml/ \
|
||||
make -C test/ run-e2e
|
||||
timeout-minutes: 30
|
||||
- name: Upload debug bundle
|
||||
|
||||
15
.github/workflows/push.yml
vendored
15
.github/workflows/push.yml
vendored
@@ -20,15 +20,6 @@ jobs:
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCS_SA_KEY }}'
|
||||
- name: 'set up GCloud SDK'
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
- name: 'use gcloud CLI'
|
||||
run: |
|
||||
gcloud info
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -52,12 +43,6 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v3'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.8-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.8-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.60
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23.8-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
8
Makefile
8
Makefile
@@ -34,11 +34,9 @@ REGISTRY ?= velero
|
||||
# docker buildx create --name=velero-builder --driver=docker-container --bootstrap --use --config ./buildkitd.toml
|
||||
# Refer to https://github.com/docker/buildx/issues/1370#issuecomment-1288516840 for more details
|
||||
INSECURE_REGISTRY ?= false
|
||||
GCR_REGISTRY ?= gcr.io/velero-gcp
|
||||
|
||||
# Image name
|
||||
IMAGE ?= $(REGISTRY)/$(BIN)
|
||||
GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
|
||||
|
||||
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
|
||||
# that pull base images from different registries.
|
||||
@@ -81,10 +79,8 @@ TAG_LATEST ?= false
|
||||
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION) $(GCR_IMAGE):latest
|
||||
else
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION)
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
# check buildx is enabled only if docker is in path
|
||||
@@ -116,7 +112,6 @@ CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 win
|
||||
BUILD_OUTPUT_TYPE ?= docker
|
||||
BUILD_OS ?= linux
|
||||
BUILD_ARCH ?= amd64
|
||||
BUILD_TAG_GCR ?= false
|
||||
BUILD_WINDOWS_VERSION ?= ltsc2022
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), docker)
|
||||
@@ -134,9 +129,6 @@ ALL_OS_ARCH.windows = $(foreach os, $(filter windows,$(ALL_OS)), $(foreach arch,
|
||||
ALL_OS_ARCH = $(ALL_OS_ARCH.linux)$(ALL_OS_ARCH.windows)
|
||||
|
||||
ALL_IMAGE_TAGS = $(IMAGE_TAGS)
|
||||
ifeq ($(BUILD_TAG_GCR), true)
|
||||
ALL_IMAGE_TAGS += $(GCR_IMAGE_TAGS)
|
||||
endif
|
||||
|
||||
# set git sha and tree state
|
||||
GIT_SHA = $(shell git rev-parse HEAD)
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.23 as tilt-helper
|
||||
FROM golang:1.23.8 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
## v1.16.1
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### All Changes
|
||||
* Call WaitGroup.Done() once only when PVB changes to final status the first time to avoid panic (#8940, @ywk253100)
|
||||
* Add VolumeSnapshotContent into the RIA and the mustHave resource list. (#8926, @blackpiglet)
|
||||
* Warn for not found error in patching managed fields (#8916, @sseago)
|
||||
* Fix issue 8878, relief node os deduction error checks (#8911, @Lyndon-Li)
|
||||
|
||||
|
||||
## v1.16
|
||||
|
||||
### Download
|
||||
|
||||
14
go.mod
14
go.mod
@@ -2,7 +2,7 @@ module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.6
|
||||
toolchain go1.23.8
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
@@ -44,9 +44,9 @@ require (
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/net v0.36.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/text v0.22.0
|
||||
golang.org/x/text v0.23.0
|
||||
google.golang.org/api v0.218.0
|
||||
google.golang.org/grpc v1.69.4
|
||||
google.golang.org/protobuf v1.36.3
|
||||
@@ -179,10 +179,10 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.35.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.29.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
|
||||
24
go.sum
24
go.sum
@@ -780,8 +780,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -866,8 +866,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -894,8 +894,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -959,14 +959,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -976,8 +976,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.23-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.23.8-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
||||
@@ -113,5 +113,4 @@ TAG_LATEST="$TAG_LATEST" \
|
||||
BUILD_OS="$BUILD_OS" \
|
||||
BUILD_ARCH="$BUILD_ARCH" \
|
||||
BUILD_OUTPUT_TYPE=$OUTPUT_TYPE \
|
||||
BUILD_TAG_GCR=true \
|
||||
make all-containers
|
||||
@@ -1,5 +1,5 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..0a0a353a7 100644
|
||||
index 5f939c481..5c5db077f 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,32 @@ require (
|
||||
@@ -14,13 +14,13 @@ index 5f939c481..0a0a353a7 100644
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.35.0
|
||||
+ golang.org/x/net v0.36.0
|
||||
+ golang.org/x/crypto v0.36.0
|
||||
+ golang.org/x/net v0.38.0
|
||||
+ golang.org/x/oauth2 v0.7.0
|
||||
+ golang.org/x/sync v0.11.0
|
||||
+ golang.org/x/sys v0.30.0
|
||||
+ golang.org/x/term v0.29.0
|
||||
+ golang.org/x/text v0.22.0
|
||||
+ golang.org/x/sync v0.12.0
|
||||
+ golang.org/x/sys v0.31.0
|
||||
+ golang.org/x/term v0.30.0
|
||||
+ golang.org/x/text v0.23.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
@@ -68,7 +68,7 @@ index 5f939c481..0a0a353a7 100644
|
||||
+
|
||||
+toolchain go1.23.7
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..5ebc8e609 100644
|
||||
index 026e1d2fa..836a9b274 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,26 @@
|
||||
@@ -170,8 +170,8 @@ index 026e1d2fa..5ebc8e609 100644
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
+golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -181,8 +181,8 @@ index 026e1d2fa..5ebc8e609 100644
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
+golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
@@ -194,8 +194,8 @@ index 026e1d2fa..5ebc8e609 100644
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
+golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -205,21 +205,21 @@ index 026e1d2fa..5ebc8e609 100644
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
+golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
||||
@@ -810,10 +810,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
|
||||
nodeOS, err := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get attaching node OS for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
nodeOS := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log)
|
||||
|
||||
if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil {
|
||||
return nil, errors.Wrapf(err, "no appropriate node to run data upload for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
|
||||
@@ -408,16 +408,6 @@ func TestReconcile(t *testing.T) {
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "failed to get PVC",
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail to get PVC attaching node",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").StorageClass("fake-sc").Result(),
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "error to get storage class",
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail because expected node doesn't exist",
|
||||
du: dataUploadBuilder().Result(),
|
||||
|
||||
@@ -377,14 +377,14 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim,
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, selectedNode string, resources corev1.ResourceRequirements, nodeType string) (*corev1.Pod, error) {
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, selectedNode string, resources corev1.ResourceRequirements, nodeOS string) (*corev1.Pod, error) {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
containerName := string(ownerObject.UID)
|
||||
volumeName := string(ownerObject.UID)
|
||||
|
||||
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, kube.NodeOSLinux)
|
||||
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, nodeOS)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error to get inherited pod info from node-agent")
|
||||
}
|
||||
@@ -427,7 +427,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1.PodOS{}
|
||||
toleration := []corev1.Toleration{}
|
||||
if nodeType == kube.NodeOSWindows {
|
||||
if nodeOS == kube.NodeOSWindows {
|
||||
userID := "ContainerAdministrator"
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
|
||||
@@ -173,11 +173,28 @@ func newBackupper(
|
||||
return
|
||||
}
|
||||
|
||||
statusChangedToFinal := true
|
||||
existObj, exist, err := b.pvbIndexer.Get(pvb)
|
||||
if err == nil && exist {
|
||||
existPVB, ok := existObj.(*velerov1api.PodVolumeBackup)
|
||||
// the PVB in the indexer is already in final status, no need to call WaitGroup.Done()
|
||||
if ok && (existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
|
||||
existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed) {
|
||||
statusChangedToFinal = false
|
||||
}
|
||||
}
|
||||
|
||||
// the Indexer inserts PVB directly if the PVB to be updated doesn't exist
|
||||
if err := b.pvbIndexer.Update(pvb); err != nil {
|
||||
log.WithError(err).Errorf("failed to update PVB %s/%s in indexer", pvb.Namespace, pvb.Name)
|
||||
}
|
||||
b.wg.Done()
|
||||
|
||||
// call WaitGroup.Done() once only when the PVB changes to final status the first time.
|
||||
// This avoid the cases that the handler gets multiple update events whose PVBs are all in final status
|
||||
// which causes panic with "negative WaitGroup counter" error
|
||||
if statusChangedToFinal {
|
||||
b.wg.Done()
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -26,6 +28,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
@@ -106,12 +109,23 @@ func (p *volumeSnapshotRestoreItemAction) Execute(
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if vsFromBackup.Status == nil ||
|
||||
vsFromBackup.Status.BoundVolumeSnapshotContentName == nil {
|
||||
p.log.Errorf("VS %s doesn't have bound VSC", vsFromBackup.Name)
|
||||
return nil, fmt.Errorf("VS %s doesn't have bound VSC", vsFromBackup.Name)
|
||||
}
|
||||
|
||||
vsc := velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.VolumeSnapshotContents,
|
||||
Name: *vsFromBackup.Status.BoundVolumeSnapshotContentName,
|
||||
}
|
||||
|
||||
p.log.Infof(`Returning from VolumeSnapshotRestoreItemAction with
|
||||
no additionalItems`)
|
||||
|
||||
return &velero.RestoreItemActionExecuteOutput{
|
||||
UpdatedItem: &unstructured.Unstructured{Object: vsMap},
|
||||
AdditionalItems: []velero.ResourceIdentifier{},
|
||||
AdditionalItems: []velero.ResourceIdentifier{vsc},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -83,6 +83,7 @@ const ObjectStatusRestoreAnnotationKey = "velero.io/restore-status"
|
||||
|
||||
var resourceMustHave = []string{
|
||||
"datauploads.velero.io",
|
||||
"volumesnapshotcontents.snapshot.storage.k8s.io",
|
||||
}
|
||||
|
||||
type VolumeSnapshotterGetter interface {
|
||||
@@ -1704,11 +1705,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
}
|
||||
if patchBytes != nil {
|
||||
if _, err = resourceClient.Patch(obj.GetName(), patchBytes); err != nil {
|
||||
restoreLogger.Errorf("error patch for managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
if !apierrors.IsNotFound(err) {
|
||||
restoreLogger.Errorf("error patch for managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
restoreLogger.Warnf("item not found when patching managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
} else {
|
||||
restoreLogger.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
|
||||
@@ -467,13 +467,13 @@ func DiagnosePV(pv *corev1api.PersistentVolume) string {
|
||||
}
|
||||
|
||||
func GetPVCAttachingNodeOS(pvc *corev1api.PersistentVolumeClaim, nodeClient corev1client.CoreV1Interface,
|
||||
storageClient storagev1.StorageV1Interface, log logrus.FieldLogger) (string, error) {
|
||||
storageClient storagev1.StorageV1Interface, log logrus.FieldLogger) string {
|
||||
var nodeOS string
|
||||
var scFsType string
|
||||
|
||||
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1api.PersistentVolumeBlock {
|
||||
log.Infof("Use linux node for block mode PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
return NodeOSLinux, nil
|
||||
return NodeOSLinux
|
||||
}
|
||||
|
||||
if pvc.Spec.VolumeName == "" {
|
||||
@@ -485,53 +485,53 @@ func GetPVCAttachingNodeOS(pvc *corev1api.PersistentVolumeClaim, nodeClient core
|
||||
}
|
||||
|
||||
nodeName := ""
|
||||
if value := pvc.Annotations[KubeAnnSelectedNode]; value != "" {
|
||||
nodeName = value
|
||||
}
|
||||
|
||||
if nodeName == "" {
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
n, err := GetPVAttachedNode(context.Background(), pvc.Spec.VolumeName, storageClient)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error to get attached node for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if n, err := GetPVAttachedNode(context.Background(), pvc.Spec.VolumeName, storageClient); err != nil {
|
||||
log.WithError(err).Warnf("Failed to get attached node for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
} else {
|
||||
nodeName = n
|
||||
}
|
||||
}
|
||||
|
||||
if nodeName != "" {
|
||||
os, err := GetNodeOS(context.Background(), nodeName, nodeClient)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error to get os from node %s for PVC %s/%s", nodeName, pvc.Namespace, pvc.Name)
|
||||
if nodeName == "" {
|
||||
if value := pvc.Annotations[KubeAnnSelectedNode]; value != "" {
|
||||
nodeName = value
|
||||
}
|
||||
}
|
||||
|
||||
nodeOS = os
|
||||
if nodeName != "" {
|
||||
if os, err := GetNodeOS(context.Background(), nodeName, nodeClient); err != nil {
|
||||
log.WithError(err).Warnf("Failed to get os from node %s for PVC %s/%s", nodeName, pvc.Namespace, pvc.Name)
|
||||
} else {
|
||||
nodeOS = os
|
||||
}
|
||||
}
|
||||
|
||||
if pvc.Spec.StorageClassName != nil {
|
||||
sc, err := storageClient.StorageClasses().Get(context.Background(), *pvc.Spec.StorageClassName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error to get storage class %s", *pvc.Spec.StorageClassName)
|
||||
}
|
||||
|
||||
if sc.Parameters != nil {
|
||||
if sc, err := storageClient.StorageClasses().Get(context.Background(), *pvc.Spec.StorageClassName, metav1.GetOptions{}); err != nil {
|
||||
log.WithError(err).Warnf("Failed to get storage class %s for PVC %s/%s", *pvc.Spec.StorageClassName, pvc.Namespace, pvc.Name)
|
||||
} else if sc.Parameters != nil {
|
||||
scFsType = strings.ToLower(sc.Parameters["csi.storage.k8s.io/fstype"])
|
||||
}
|
||||
}
|
||||
|
||||
if nodeOS != "" {
|
||||
log.Infof("Deduced node os %s from selected node for PVC %s/%s (fsType %s)", nodeOS, pvc.Namespace, pvc.Name, scFsType)
|
||||
return nodeOS, nil
|
||||
log.Infof("Deduced node os %s from selected/attached node for PVC %s/%s (fsType %s)", nodeOS, pvc.Namespace, pvc.Name, scFsType)
|
||||
return nodeOS
|
||||
}
|
||||
|
||||
if scFsType == "ntfs" {
|
||||
log.Infof("Deduced Windows node os from fsType for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
return NodeOSWindows, nil
|
||||
log.Infof("Deduced Windows node os from fsType %s for PVC %s/%s", scFsType, pvc.Namespace, pvc.Name)
|
||||
return NodeOSWindows
|
||||
}
|
||||
|
||||
if scFsType != "" {
|
||||
log.Infof("Deduced linux node os from fsType %s for PVC %s/%s", scFsType, pvc.Namespace, pvc.Name)
|
||||
return NodeOSLinux
|
||||
}
|
||||
|
||||
log.Warnf("Cannot deduce node os for PVC %s/%s, default to linux", pvc.Namespace, pvc.Name)
|
||||
return NodeOSLinux, nil
|
||||
return NodeOSLinux
|
||||
}
|
||||
|
||||
func GetPVAttachedNode(ctx context.Context, pv string, storageClient storagev1.StorageV1Interface) (string, error) {
|
||||
|
||||
@@ -1674,34 +1674,6 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pvcObjWithNode := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-namespace",
|
||||
Name: "fake-pvc",
|
||||
Annotations: map[string]string{KubeAnnSelectedNode: "fake-node"},
|
||||
},
|
||||
}
|
||||
|
||||
pvcObjWithVolume := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-namespace",
|
||||
Name: "fake-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-volume-name",
|
||||
},
|
||||
}
|
||||
|
||||
pvcObjWithStorageClass := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-namespace",
|
||||
Name: "fake-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
StorageClassName: &storageClass,
|
||||
},
|
||||
}
|
||||
|
||||
pvName := "fake-volume-name"
|
||||
pvcObjWithAll := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1715,17 +1687,6 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pvcObjWithVolumeSC := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-namespace",
|
||||
Name: "fake-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: pvName,
|
||||
StorageClassName: &storageClass,
|
||||
},
|
||||
}
|
||||
|
||||
scObjWithoutFSType := &storagev1api.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-storage-class",
|
||||
@@ -1739,6 +1700,13 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
Parameters: map[string]string{"csi.storage.k8s.io/fstype": "ntfs"},
|
||||
}
|
||||
|
||||
scObjWithFSTypeExt := &storagev1api.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-storage-class",
|
||||
},
|
||||
Parameters: map[string]string{"csi.storage.k8s.io/fstype": "ext4"},
|
||||
}
|
||||
|
||||
volAttachEmpty := &storagev1api.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-volume-attach-1",
|
||||
@@ -1753,6 +1721,7 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
Source: storagev1api.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1773,7 +1742,6 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
kubeClientObj []runtime.Object
|
||||
expectedNodeOS string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "no selected node, volume name and storage class",
|
||||
@@ -1781,53 +1749,51 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
expectedNodeOS: NodeOSLinux,
|
||||
},
|
||||
{
|
||||
name: "node doesn't exist",
|
||||
pvc: pvcObjWithNode,
|
||||
err: "error to get os from node fake-node for PVC fake-namespace/fake-pvc: error getting node fake-node: nodes \"fake-node\" not found",
|
||||
name: "fallback",
|
||||
pvc: pvcObjWithAll,
|
||||
expectedNodeOS: NodeOSLinux,
|
||||
},
|
||||
{
|
||||
name: "node without os label",
|
||||
pvc: pvcObjWithNode,
|
||||
name: "with selected node, but node without label",
|
||||
pvc: pvcObjWithAll,
|
||||
kubeClientObj: []runtime.Object{
|
||||
nodeNoOSLabel,
|
||||
},
|
||||
expectedNodeOS: NodeOSLinux,
|
||||
},
|
||||
{
|
||||
name: "no attach volume",
|
||||
pvc: pvcObjWithVolume,
|
||||
expectedNodeOS: NodeOSLinux,
|
||||
},
|
||||
{
|
||||
name: "sc doesn't exist",
|
||||
pvc: pvcObjWithStorageClass,
|
||||
err: "error to get storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found",
|
||||
},
|
||||
{
|
||||
name: "volume attachment not exist",
|
||||
pvc: pvcObjWithVolume,
|
||||
name: "volume attachment exist, but get node os fails",
|
||||
pvc: pvcObjWithAll,
|
||||
kubeClientObj: []runtime.Object{
|
||||
nodeWindows,
|
||||
scObjWithFSType,
|
||||
volAttachEmpty,
|
||||
volAttachWithOtherVolume,
|
||||
volAttachWithVolume,
|
||||
},
|
||||
expectedNodeOS: NodeOSLinux,
|
||||
expectedNodeOS: NodeOSWindows,
|
||||
},
|
||||
{
|
||||
name: "volume attachment exist, node without label",
|
||||
pvc: pvcObjWithAll,
|
||||
kubeClientObj: []runtime.Object{
|
||||
nodeNoOSLabel,
|
||||
scObjWithFSType,
|
||||
volAttachWithVolume,
|
||||
},
|
||||
expectedNodeOS: NodeOSWindows,
|
||||
},
|
||||
{
|
||||
name: "sc without fsType",
|
||||
pvc: pvcObjWithStorageClass,
|
||||
pvc: pvcObjWithAll,
|
||||
kubeClientObj: []runtime.Object{
|
||||
scObjWithoutFSType,
|
||||
},
|
||||
expectedNodeOS: NodeOSLinux,
|
||||
},
|
||||
{
|
||||
name: "deduce from node os",
|
||||
name: "deduce from node os by selected node",
|
||||
pvc: pvcObjWithAll,
|
||||
kubeClientObj: []runtime.Object{
|
||||
nodeWindows,
|
||||
scObjWithFSType,
|
||||
scObjWithFSTypeExt,
|
||||
},
|
||||
expectedNodeOS: NodeOSWindows,
|
||||
},
|
||||
@@ -1842,13 +1808,13 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deduce from attached node os",
|
||||
pvc: pvcObjWithVolumeSC,
|
||||
pvc: pvcObjWithAll,
|
||||
kubeClientObj: []runtime.Object{
|
||||
nodeWindows,
|
||||
scObjWithFSType,
|
||||
scObjWithFSTypeExt,
|
||||
volAttachEmpty,
|
||||
volAttachWithOtherVolume,
|
||||
volAttachWithVolume,
|
||||
volAttachWithOtherVolume,
|
||||
},
|
||||
expectedNodeOS: NodeOSWindows,
|
||||
},
|
||||
@@ -1864,13 +1830,7 @@ func TestGetPVCAttachingNodeOS(t *testing.T) {
|
||||
|
||||
var kubeClient kubernetes.Interface = fakeKubeClient
|
||||
|
||||
nodeOS, err := GetPVCAttachingNodeOS(test.pvc, kubeClient.CoreV1(), kubeClient.StorageV1(), velerotest.NewLogger())
|
||||
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, test.err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
nodeOS := GetPVCAttachingNodeOS(test.pvc, kubeClient.CoreV1(), kubeClient.StorageV1(), velerotest.NewLogger())
|
||||
|
||||
assert.Equal(t, test.expectedNodeOS, nodeOS)
|
||||
})
|
||||
|
||||
@@ -102,6 +102,7 @@ OBJECT_STORE_PROVIDER ?=
|
||||
INSTALL_VELERO ?= true
|
||||
REGISTRY_CREDENTIAL_FILE ?=
|
||||
KIBISHII_DIRECTORY ?= github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/
|
||||
IMAGE_REGISTRY_PROXY ?=
|
||||
|
||||
|
||||
# Flags to create an additional BSL for multiple credentials tests
|
||||
@@ -216,7 +217,8 @@ run-e2e: ginkgo
|
||||
--default-cls-service-account-name=$(DEFAULT_CLS_SERVICE_ACCOUNT_NAME) \
|
||||
--standby-cls-service-account-name=$(STANDBY_CLS_SERVICE_ACCOUNT_NAME) \
|
||||
--kibishii-directory=$(KIBISHII_DIRECTORY) \
|
||||
--disable-informer-cache=$(DISABLE_INFORMER_CACHE)
|
||||
--disable-informer-cache=$(DISABLE_INFORMER_CACHE) \
|
||||
--image-registry-proxy=$(IMAGE_REGISTRY_PROXY)
|
||||
|
||||
.PHONY: run-perf
|
||||
run-perf: ginkgo
|
||||
|
||||
@@ -328,26 +328,26 @@ STANDBY_CLUSTER=wl-antreav1311 \
|
||||
DEFAULT_CLUSTER_NAME=192.168.0.4 \
|
||||
STANDBY_CLUSTER_NAME=192.168.0.3 \
|
||||
FEATURES=EnableCSI \
|
||||
PLUGINS=gcr.io/velero-gcp/velero-plugin-for-aws:main \
|
||||
PLUGINS=velero/velero-plugin-for-aws:main \
|
||||
HAS_VSPHERE_PLUGIN=false \
|
||||
OBJECT_STORE_PROVIDER=aws \
|
||||
CREDS_FILE=$HOME/aws-credential \
|
||||
BSL_CONFIG=region=us-east-1 \
|
||||
BSL_BUCKET=nightly-normal-account4-test \
|
||||
BSL_PREFIX=nightly \
|
||||
ADDITIONAL_BSL_PLUGINS=gcr.io/velero-gcp/velero-plugin-for-aws:main \
|
||||
ADDITIONAL_BSL_PLUGINS=velero/velero-plugin-for-aws:main \
|
||||
ADDITIONAL_OBJECT_STORE_PROVIDER=aws \
|
||||
ADDITIONAL_BSL_CONFIG=region=us-east-1 \
|
||||
ADDITIONAL_BSL_BUCKET=nightly-restrict-account-test \
|
||||
ADDITIONAL_BSL_PREFIX=nightly \
|
||||
ADDITIONAL_CREDS_FILE=$HOME/aws-credential \
|
||||
VELERO_IMAGE=gcr.io/velero-gcp/velero:main \
|
||||
RESTORE_HELPER_IMAGE=gcr.io/velero-gcp/velero-restore-helper:main \
|
||||
VELERO_IMAGE=velero/velero:main \
|
||||
RESTORE_HELPER_IMAGE=velero/velero:main \
|
||||
VERSION=main \
|
||||
SNAPSHOT_MOVE_DATA=true \
|
||||
STANDBY_CLUSTER_CLOUD_PROVIDER=vsphere \
|
||||
STANDBY_CLUSTER_OBJECT_STORE_PROVIDER=aws \
|
||||
STANDBY_CLUSTER_PLUGINS=gcr.io/velero-gcp/velero-plugin-for-aws:main \
|
||||
STANDBY_CLUSTER_PLUGINS=velero/velero-plugin-for-aws:main \
|
||||
DISABLE_INFORMER_CACHE=true \
|
||||
REGISTRY_CREDENTIAL_FILE=$HOME/.docker/config.json \
|
||||
GINKGO_LABELS=Migration \
|
||||
|
||||
@@ -172,7 +172,9 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
|
||||
|
||||
Expect(VeleroInstall(context.Background(), &veleroCfg, false)).To(Succeed())
|
||||
}
|
||||
Expect(VeleroAddPluginsForProvider(context.TODO(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, veleroCfg.AdditionalBSLProvider, veleroCfg.AddBSLPlugins)).To(Succeed())
|
||||
plugins, err := GetPlugins(context.TODO(), veleroCfg, false)
|
||||
Expect(err).To(Succeed())
|
||||
Expect(AddPlugins(plugins, veleroCfg)).To(Succeed())
|
||||
|
||||
// Create Secret for additional BSL
|
||||
secretName := fmt.Sprintf("bsl-credentials-%s", UUIDgen)
|
||||
|
||||
@@ -115,8 +115,17 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
|
||||
}()
|
||||
}
|
||||
|
||||
if err := KibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName, ns,
|
||||
registryCredentialFile, veleroFeatures, kibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData); err != nil {
|
||||
if err := KibishiiPrepareBeforeBackup(
|
||||
oneHourTimeout,
|
||||
client,
|
||||
providerName,
|
||||
ns,
|
||||
registryCredentialFile,
|
||||
veleroFeatures,
|
||||
kibishiiDirectory,
|
||||
DefaultKibishiiData,
|
||||
veleroCfg.ImageRegistryProxy,
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", ns)
|
||||
}
|
||||
err := ObjectsShouldNotBeInBucket(veleroCfg.ObjectStoreProvider, veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig, backupName, BackupObjectsPrefix, 1)
|
||||
|
||||
@@ -100,9 +100,17 @@ func TTLTest() {
|
||||
})
|
||||
|
||||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(ctx, client, veleroCfg.CloudProvider,
|
||||
test.testNS, veleroCfg.RegistryCredentialFile, veleroCfg.Features,
|
||||
veleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
Expect(KibishiiPrepareBeforeBackup(
|
||||
ctx,
|
||||
client,
|
||||
veleroCfg.CloudProvider,
|
||||
test.testNS,
|
||||
veleroCfg.RegistryCredentialFile,
|
||||
veleroCfg.Features,
|
||||
veleroCfg.KibishiiDirectory,
|
||||
DefaultKibishiiData,
|
||||
veleroCfg.ImageRegistryProxy,
|
||||
)).To(Succeed())
|
||||
})
|
||||
|
||||
var BackupCfg BackupConfig
|
||||
|
||||
@@ -121,7 +121,7 @@ func (v *BackupVolumeInfo) CreateResources() error {
|
||||
volumeName := fmt.Sprintf("volume-info-pv-%d", i)
|
||||
vols = append(vols, CreateVolumes(pvc.Name, []string{volumeName})...)
|
||||
}
|
||||
deployment := NewDeployment(v.CaseBaseName, createNSName, 1, labels, nil).WithVolume(vols).Result()
|
||||
deployment := NewDeployment(v.CaseBaseName, createNSName, 1, labels, v.VeleroCfg.ImageRegistryProxy).WithVolume(vols).Result()
|
||||
deployment, err := CreateDeployment(v.Client.ClientGo, createNSName, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", createNSName))
|
||||
|
||||
@@ -91,9 +91,17 @@ func (n *NamespaceMapping) CreateResources() error {
|
||||
Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
|
||||
})
|
||||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(n.Ctx, n.Client, n.VeleroCfg.CloudProvider,
|
||||
ns, n.VeleroCfg.RegistryCredentialFile, n.VeleroCfg.Features,
|
||||
n.VeleroCfg.KibishiiDirectory, false, n.kibishiiData)).To(Succeed())
|
||||
Expect(KibishiiPrepareBeforeBackup(
|
||||
n.Ctx,
|
||||
n.Client,
|
||||
n.VeleroCfg.CloudProvider,
|
||||
ns,
|
||||
n.VeleroCfg.RegistryCredentialFile,
|
||||
n.VeleroCfg.Features,
|
||||
n.VeleroCfg.KibishiiDirectory,
|
||||
n.kibishiiData,
|
||||
n.VeleroCfg.ImageRegistryProxy,
|
||||
)).To(Succeed())
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -75,7 +75,8 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
|
||||
p.oldNodeName = nodeName
|
||||
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
|
||||
pvcAnn := map[string]string{p.ann: nodeName}
|
||||
_, err := CreatePod(p.Client, p.namespace, p.podName, StorageClassName, p.pvcName, []string{p.volume}, pvcAnn, nil)
|
||||
_, err := CreatePod(p.Client, p.namespace, p.podName, StorageClassName, p.pvcName, []string{p.volume},
|
||||
pvcAnn, nil, p.VeleroCfg.ImageRegistryProxy)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
|
||||
@@ -82,7 +82,7 @@ func (s *StorageClasssChanging) CreateResources() error {
|
||||
Expect(err).To(Succeed())
|
||||
vols := CreateVolumes(pvc.Name, []string{s.volume})
|
||||
|
||||
deployment := NewDeployment(s.CaseBaseName, s.namespace, 1, label, nil).WithVolume(vols).Result()
|
||||
deployment := NewDeployment(s.CaseBaseName, s.namespace, 1, label, s.VeleroCfg.ImageRegistryProxy).WithVolume(vols).Result()
|
||||
deployment, err = CreateDeployment(s.Client.ClientGo, s.namespace, deployment)
|
||||
Expect(err).To(Succeed())
|
||||
s.deploymentName = deployment.Name
|
||||
|
||||
@@ -106,7 +106,9 @@ func BslDeletionTest(useVolumeSnapshots bool) {
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Add an additional plugin for provider %s", veleroCfg.AdditionalBSLProvider), func() {
|
||||
Expect(VeleroAddPluginsForProvider(context.TODO(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, veleroCfg.AdditionalBSLProvider, veleroCfg.AddBSLPlugins)).To(Succeed())
|
||||
plugins, err := GetPlugins(context.TODO(), veleroCfg, false)
|
||||
Expect(err).To(Succeed())
|
||||
Expect(AddPlugins(plugins, veleroCfg)).To(Succeed())
|
||||
})
|
||||
|
||||
additionalBsl := fmt.Sprintf("bsl-%s", UUIDgen)
|
||||
@@ -150,9 +152,17 @@ func BslDeletionTest(useVolumeSnapshots bool) {
|
||||
})
|
||||
|
||||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *veleroCfg.ClientToInstallVelero, veleroCfg.CloudProvider,
|
||||
bslDeletionTestNs, veleroCfg.RegistryCredentialFile, veleroCfg.Features,
|
||||
veleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
Expect(KibishiiPrepareBeforeBackup(
|
||||
oneHourTimeout,
|
||||
*veleroCfg.ClientToInstallVelero,
|
||||
veleroCfg.CloudProvider,
|
||||
bslDeletionTestNs,
|
||||
veleroCfg.RegistryCredentialFile,
|
||||
veleroCfg.Features,
|
||||
veleroCfg.KibishiiDirectory,
|
||||
DefaultKibishiiData,
|
||||
veleroCfg.ImageRegistryProxy,
|
||||
)).To(Succeed())
|
||||
})
|
||||
|
||||
// Restic can not backup PV only, so pod need to be labeled also
|
||||
|
||||
@@ -55,6 +55,7 @@ import (
|
||||
|
||||
func init() {
|
||||
test.VeleroCfg.Options = install.Options{}
|
||||
test.VeleroCfg.BackupRepoConfigMap = test.BackupRepositoryConfigName // Set to the default value
|
||||
flag.StringVar(
|
||||
&test.VeleroCfg.CloudProvider,
|
||||
"cloud-provider",
|
||||
@@ -343,6 +344,18 @@ func init() {
|
||||
false,
|
||||
"a switch for installing vSphere plugin.",
|
||||
)
|
||||
flag.IntVar(
|
||||
&test.VeleroCfg.ItemBlockWorkerCount,
|
||||
"item-block-worker-count",
|
||||
1,
|
||||
"Velero backup's item block worker count.",
|
||||
)
|
||||
flag.StringVar(
|
||||
&test.VeleroCfg.ImageRegistryProxy,
|
||||
"image-registry-proxy",
|
||||
"",
|
||||
"The image registry proxy, e.g. when the DockerHub access limitation is reached, can use available proxy to replace. Default is nil.",
|
||||
)
|
||||
}
|
||||
|
||||
// Add label [SkipVanillaZfs]:
|
||||
@@ -687,6 +700,8 @@ func TestE2e(t *testing.T) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
veleroutil.UpdateImagesMatrixByProxy(test.VeleroCfg.ImageRegistryProxy)
|
||||
|
||||
RegisterFailHandler(Fail)
|
||||
testSuitePassed = RunSpecs(t, "E2e Suite")
|
||||
}
|
||||
|
||||
@@ -195,8 +195,8 @@ func (m *migrationE2E) Backup() error {
|
||||
OriginVeleroCfg.RegistryCredentialFile,
|
||||
OriginVeleroCfg.Features,
|
||||
OriginVeleroCfg.KibishiiDirectory,
|
||||
OriginVeleroCfg.UseVolumeSnapshots,
|
||||
&m.kibishiiData,
|
||||
OriginVeleroCfg.ImageRegistryProxy,
|
||||
)).To(Succeed())
|
||||
})
|
||||
|
||||
|
||||
@@ -95,7 +95,8 @@ func (p *ParallelFilesDownload) CreateResources() error {
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", p.pod, p.namespace), func() {
|
||||
_, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume}, nil, nil)
|
||||
_, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume},
|
||||
nil, nil, p.VeleroCfg.ImageRegistryProxy)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.pod})
|
||||
Expect(err).To(Succeed())
|
||||
|
||||
@@ -86,7 +86,8 @@ func (p *ParallelFilesUpload) CreateResources() error {
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", p.pod, p.namespace), func() {
|
||||
_, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc, []string{p.volume}, nil, nil)
|
||||
_, err := CreatePod(p.Client, p.namespace, p.pod, StorageClassName, p.pvc,
|
||||
[]string{p.volume}, nil, nil, p.VeleroCfg.ImageRegistryProxy)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(p.Ctx, p.Client, p.namespace, []string{p.pod})
|
||||
Expect(err).To(Succeed())
|
||||
|
||||
@@ -87,7 +87,8 @@ func (p *PVBackupFiltering) CreateResources() error {
|
||||
podName := fmt.Sprintf("pod-%d", i)
|
||||
pods = append(pods, podName)
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
|
||||
pod, err := CreatePod(p.Client, ns, podName, StorageClassName, "", volumes, nil, nil)
|
||||
pod, err := CreatePod(p.Client, ns, podName, StorageClassName, "",
|
||||
volumes, nil, nil, p.VeleroCfg.ImageRegistryProxy)
|
||||
Expect(err).To(Succeed())
|
||||
ann := map[string]string{
|
||||
p.annotation: volumesToAnnotation,
|
||||
|
||||
@@ -68,7 +68,7 @@ func (f *FilteringCase) CreateResources() error {
|
||||
}
|
||||
//Create deployment
|
||||
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
|
||||
deployment := NewDeployment(f.CaseBaseName, namespace, f.replica, f.labels, nil).Result()
|
||||
deployment := NewDeployment(f.CaseBaseName, namespace, f.replica, f.labels, f.VeleroCfg.ImageRegistryProxy).Result()
|
||||
deployment, err := CreateDeployment(f.Client.ClientGo, namespace, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))
|
||||
|
||||
@@ -88,7 +88,7 @@ func (e *ExcludeFromBackup) CreateResources() error {
|
||||
}
|
||||
//Create deployment: to be included
|
||||
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
|
||||
deployment := NewDeployment(e.CaseBaseName, namespace, e.replica, label2, nil).Result()
|
||||
deployment := NewDeployment(e.CaseBaseName, namespace, e.replica, label2, e.VeleroCfg.ImageRegistryProxy).Result()
|
||||
deployment, err := CreateDeployment(e.Client.ClientGo, namespace, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))
|
||||
|
||||
@@ -88,7 +88,7 @@ func (l *LabelSelector) CreateResources() error {
|
||||
//Create deployment
|
||||
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
|
||||
|
||||
deployment := NewDeployment(l.CaseBaseName, namespace, l.replica, labels, nil).Result()
|
||||
deployment := NewDeployment(l.CaseBaseName, namespace, l.replica, labels, l.VeleroCfg.ImageRegistryProxy).Result()
|
||||
deployment, err := CreateDeployment(l.Client.ClientGo, namespace, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))
|
||||
|
||||
@@ -145,7 +145,7 @@ func (r *ResourceModifiersCase) Clean() error {
|
||||
}
|
||||
|
||||
func (r *ResourceModifiersCase) createDeployment(namespace string) error {
|
||||
deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"app": "test"}, nil).Result()
|
||||
deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"app": "test"}, r.VeleroCfg.ImageRegistryProxy).Result()
|
||||
deployment, err := CreateDeployment(r.Client.ClientGo, namespace, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to create deloyment %s the namespace %q", deployment.Name, namespace))
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
. "github.com/vmware-tanzu/velero/test"
|
||||
@@ -186,7 +186,7 @@ func (r *ResourcePoliciesCase) Clean() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ResourcePoliciesCase) createPVC(index int, namespace string, volList []*v1.Volume) error {
|
||||
func (r *ResourcePoliciesCase) createPVC(index int, namespace string, volList []*corev1api.Volume) error {
|
||||
var err error
|
||||
for i := range volList {
|
||||
pvcName := fmt.Sprintf("pvc-%d", i)
|
||||
@@ -208,8 +208,8 @@ func (r *ResourcePoliciesCase) createPVC(index int, namespace string, volList []
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ResourcePoliciesCase) createDeploymentWithVolume(namespace string, volList []*v1.Volume) error {
|
||||
deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"resource-policies": "resource-policies"}, nil).WithVolume(volList).Result()
|
||||
func (r *ResourcePoliciesCase) createDeploymentWithVolume(namespace string, volList []*corev1api.Volume) error {
|
||||
deployment := NewDeployment(r.CaseBaseName, namespace, 1, map[string]string{"resource-policies": "resource-policies"}, r.VeleroCfg.ImageRegistryProxy).WithVolume(volList).Result()
|
||||
deployment, err := CreateDeployment(r.Client.ClientGo, namespace, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to create deloyment %s the namespace %q", deployment.Name, namespace))
|
||||
|
||||
@@ -84,6 +84,7 @@ func (s *InProgressCase) CreateResources() error {
|
||||
[]string{s.volume},
|
||||
nil,
|
||||
s.podAnn,
|
||||
s.VeleroCfg.ImageRegistryProxy,
|
||||
)
|
||||
Expect(err).To(Succeed())
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func (o *OrderedResources) CreateResources() error {
|
||||
//Create deployment
|
||||
deploymentName := fmt.Sprintf("deploy-%s", o.CaseBaseName)
|
||||
fmt.Printf("Creating deployment %s in %s namespaces ...\n", deploymentName, o.Namespace)
|
||||
deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result()
|
||||
deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, o.VeleroCfg.ImageRegistryProxy).Result()
|
||||
_, err := k8sutil.CreateDeployment(o.Client.ClientGo, o.Namespace, deployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to create namespace %q with err %v", o.Namespace, err))
|
||||
|
||||
@@ -126,6 +126,15 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||
tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion = veleroCLI2Version.VeleroVersion
|
||||
tmpCfgForOldVeleroInstall.VeleroCLI = veleroCLI2Version.VeleroCLI
|
||||
|
||||
// CLI under version v1.14.x
|
||||
if veleroCLI2Version.VeleroVersion < "v1.15" {
|
||||
tmpCfgForOldVeleroInstall.BackupRepoConfigMap = ""
|
||||
fmt.Printf(
|
||||
"CLI version %s is lower than v1.15. Set BackupRepoConfigMap to empty, because it's not supported",
|
||||
veleroCLI2Version.VeleroVersion,
|
||||
)
|
||||
}
|
||||
|
||||
tmpCfgForOldVeleroInstall, err = SetImagesToDefaultValues(
|
||||
tmpCfgForOldVeleroInstall,
|
||||
veleroCLI2Version.VeleroVersion,
|
||||
@@ -157,9 +166,17 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||
})
|
||||
|
||||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *veleroCfg.ClientToInstallVelero, tmpCfg.CloudProvider,
|
||||
upgradeNamespace, tmpCfg.RegistryCredentialFile, tmpCfg.Features,
|
||||
tmpCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
Expect(KibishiiPrepareBeforeBackup(
|
||||
oneHourTimeout,
|
||||
*veleroCfg.ClientToInstallVelero,
|
||||
tmpCfg.CloudProvider,
|
||||
upgradeNamespace,
|
||||
tmpCfg.RegistryCredentialFile,
|
||||
tmpCfg.Features,
|
||||
tmpCfg.KibishiiDirectory,
|
||||
DefaultKibishiiData,
|
||||
tmpCfg.ImageRegistryProxy,
|
||||
)).To(Succeed())
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Backup namespace %s", upgradeNamespace), func() {
|
||||
@@ -239,6 +256,9 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||
}
|
||||
})
|
||||
|
||||
// Wait for 70s to make sure the backups are synced after Velero reinstall
|
||||
time.Sleep(70 * time.Second)
|
||||
|
||||
By(fmt.Sprintf("Restore %s", upgradeNamespace), func() {
|
||||
Expect(VeleroRestore(oneHourTimeout, tmpCfg.VeleroCLI,
|
||||
tmpCfg.VeleroNamespace, restoreName, backupName, "")).To(Succeed(), func() string {
|
||||
|
||||
@@ -44,13 +44,17 @@ const CSI = "csi"
|
||||
const Velero = "velero"
|
||||
const VeleroRestoreHelper = "velero-restore-helper"
|
||||
|
||||
const UploaderTypeRestic = "restic"
|
||||
const (
|
||||
UploaderTypeRestic = "restic"
|
||||
UploaderTypeKopia = "kopia"
|
||||
)
|
||||
|
||||
const (
|
||||
KubeSystemNamespace = "kube-system"
|
||||
VSphereCSIControllerNamespace = "vmware-system-csi"
|
||||
VeleroVSphereSecretName = "velero-vsphere-config-secret"
|
||||
VeleroVSphereConfigMapName = "velero-vsphere-plugin-config"
|
||||
BackupRepositoryConfigName = "backup-repository-config"
|
||||
)
|
||||
|
||||
var PublicCloudProviders = []string{AWS, Azure, GCP, Vsphere}
|
||||
@@ -124,6 +128,7 @@ type VeleroConfig struct {
|
||||
EKSPolicyARN string
|
||||
FailFast bool
|
||||
HasVspherePlugin bool
|
||||
ImageRegistryProxy string
|
||||
}
|
||||
|
||||
type VeleroCfgInPerf struct {
|
||||
|
||||
@@ -18,11 +18,12 @@ package k8s
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -36,6 +37,7 @@ const (
|
||||
PollInterval = 2 * time.Second
|
||||
PollTimeout = 15 * time.Minute
|
||||
DefaultContainerName = "container-busybox"
|
||||
TestImage = "busybox:1.37.0"
|
||||
)
|
||||
|
||||
// DeploymentBuilder builds Deployment objects.
|
||||
@@ -48,29 +50,33 @@ func (d *DeploymentBuilder) Result() *apps.Deployment {
|
||||
}
|
||||
|
||||
// newDeployment returns a RollingUpdate Deployment with a fake container image
|
||||
func NewDeployment(name, ns string, replicas int32, labels map[string]string, containers []v1.Container) *DeploymentBuilder {
|
||||
if containers == nil {
|
||||
containers = []v1.Container{
|
||||
{
|
||||
Name: DefaultContainerName,
|
||||
Image: "gcr.io/velero-gcp/busybox:latest",
|
||||
Command: []string{"sleep", "1000000"},
|
||||
// Make pod obeys the restricted pod security standards.
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolptr.False(),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{"ALL"},
|
||||
},
|
||||
RunAsNonRoot: boolptr.True(),
|
||||
RunAsUser: func(i int64) *int64 { return &i }(65534),
|
||||
RunAsGroup: func(i int64) *int64 { return &i }(65534),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
func NewDeployment(name, ns string, replicas int32, labels map[string]string, imageRegistryProxy string) *DeploymentBuilder {
|
||||
imageAddress := TestImage
|
||||
if imageRegistryProxy != "" {
|
||||
imageAddress = path.Join(imageRegistryProxy, TestImage)
|
||||
}
|
||||
|
||||
containers := []corev1api.Container{
|
||||
{
|
||||
Name: DefaultContainerName,
|
||||
Image: imageAddress,
|
||||
Command: []string{"sleep", "1000000"},
|
||||
// Make pod obeys the restricted pod security standards.
|
||||
SecurityContext: &corev1api.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolptr.False(),
|
||||
Capabilities: &corev1api.Capabilities{
|
||||
Drop: []corev1api.Capability{"ALL"},
|
||||
},
|
||||
RunAsNonRoot: boolptr.True(),
|
||||
RunAsUser: func(i int64) *int64 { return &i }(65534),
|
||||
RunAsGroup: func(i int64) *int64 { return &i }(65534),
|
||||
SeccompProfile: &corev1api.SeccompProfile{
|
||||
Type: corev1api.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return &DeploymentBuilder{
|
||||
&apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -89,14 +95,14 @@ func NewDeployment(name, ns string, replicas int32, labels map[string]string, co
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: new(apps.RollingUpdateDeployment),
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
Spec: corev1api.PodSpec{
|
||||
SecurityContext: &corev1api.PodSecurityContext{
|
||||
FSGroup: func(i int64) *int64 { return &i }(65534),
|
||||
FSGroupChangePolicy: func(policy v1.PodFSGroupChangePolicy) *v1.PodFSGroupChangePolicy { return &policy }(v1.FSGroupChangeAlways),
|
||||
FSGroupChangePolicy: func(policy corev1api.PodFSGroupChangePolicy) *corev1api.PodFSGroupChangePolicy { return &policy }(corev1api.FSGroupChangeAlways),
|
||||
},
|
||||
Containers: containers,
|
||||
},
|
||||
@@ -106,10 +112,10 @@ func NewDeployment(name, ns string, replicas int32, labels map[string]string, co
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeploymentBuilder) WithVolume(volumes []*v1.Volume) *DeploymentBuilder {
|
||||
vmList := []v1.VolumeMount{}
|
||||
func (d *DeploymentBuilder) WithVolume(volumes []*corev1api.Volume) *DeploymentBuilder {
|
||||
vmList := []corev1api.VolumeMount{}
|
||||
for _, v := range volumes {
|
||||
vmList = append(vmList, v1.VolumeMount{
|
||||
vmList = append(vmList, corev1api.VolumeMount{
|
||||
Name: v.Name,
|
||||
MountPath: "/" + v.Name,
|
||||
})
|
||||
|
||||
@@ -19,20 +19,32 @@ package k8s
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
)
|
||||
|
||||
func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1.Pod, error) {
|
||||
func CreatePod(
|
||||
client TestClient,
|
||||
ns, name, sc, pvcName string,
|
||||
volumeNameList []string,
|
||||
pvcAnn, ann map[string]string,
|
||||
imageRegistryProxy string,
|
||||
) (*corev1api.Pod, error) {
|
||||
if pvcName != "" && len(volumeNameList) != 1 {
|
||||
return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty")
|
||||
}
|
||||
volumes := []corev1.Volume{}
|
||||
|
||||
imageAddress := TestImage
|
||||
if imageRegistryProxy != "" {
|
||||
imageAddress = path.Join(imageRegistryProxy, TestImage)
|
||||
}
|
||||
|
||||
volumes := []corev1api.Volume{}
|
||||
for _, volume := range volumeNameList {
|
||||
var _pvcName string
|
||||
if pvcName == "" {
|
||||
@@ -45,10 +57,10 @@ func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList [
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
volumes = append(volumes, corev1api.Volume{
|
||||
Name: volume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
@@ -56,41 +68,41 @@ func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList [
|
||||
})
|
||||
}
|
||||
|
||||
vmList := []corev1.VolumeMount{}
|
||||
vmList := []corev1api.VolumeMount{}
|
||||
for _, v := range volumes {
|
||||
vmList = append(vmList, corev1.VolumeMount{
|
||||
vmList = append(vmList, corev1api.VolumeMount{
|
||||
Name: v.Name,
|
||||
MountPath: "/" + v.Name,
|
||||
})
|
||||
}
|
||||
|
||||
p := &corev1.Pod{
|
||||
p := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: ann,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
Spec: corev1api.PodSpec{
|
||||
SecurityContext: &corev1api.PodSecurityContext{
|
||||
FSGroup: func(i int64) *int64 { return &i }(65534),
|
||||
FSGroupChangePolicy: func(policy v1.PodFSGroupChangePolicy) *v1.PodFSGroupChangePolicy { return &policy }(v1.FSGroupChangeAlways),
|
||||
FSGroupChangePolicy: func(policy corev1api.PodFSGroupChangePolicy) *corev1api.PodFSGroupChangePolicy { return &policy }(corev1api.FSGroupChangeAlways),
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: "gcr.io/velero-gcp/busybox",
|
||||
Image: imageAddress,
|
||||
Command: []string{"sleep", "3600"},
|
||||
VolumeMounts: vmList,
|
||||
// Make pod obeys the restricted pod security standards.
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
SecurityContext: &corev1api.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolptr.False(),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{"ALL"},
|
||||
Capabilities: &corev1api.Capabilities{
|
||||
Drop: []corev1api.Capability{"ALL"},
|
||||
},
|
||||
RunAsNonRoot: boolptr.True(),
|
||||
RunAsUser: func(i int64) *int64 { return &i }(65534),
|
||||
RunAsGroup: func(i int64) *int64 { return &i }(65534),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
SeccompProfile: &corev1api.SeccompProfile{
|
||||
Type: corev1api.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -102,11 +114,11 @@ func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList [
|
||||
return client.ClientGo.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func GetPod(ctx context.Context, client TestClient, namespace string, pod string) (*corev1.Pod, error) {
|
||||
func GetPod(ctx context.Context, client TestClient, namespace string, pod string) (*corev1api.Pod, error) {
|
||||
return client.ClientGo.CoreV1().Pods(namespace).Get(ctx, pod, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podName string, ann map[string]string) (*corev1.Pod, error) {
|
||||
func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podName string, ann map[string]string) (*corev1api.Pod, error) {
|
||||
newPod, err := GetPod(ctx, client, namespace, podName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("Fail to ge pod %s in namespace %s", podName, namespace))
|
||||
@@ -125,6 +137,6 @@ func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podNa
|
||||
return client.ClientGo.CoreV1().Pods(namespace).Update(ctx, newPod, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
func ListPods(ctx context.Context, client TestClient, namespace string) (*corev1.PodList, error) {
|
||||
func ListPods(ctx context.Context, client TestClient, namespace string) (*corev1api.PodList, error) {
|
||||
return client.ClientGo.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
||||
}
|
||||
|
||||
@@ -18,7 +18,10 @@ package kibishii
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -26,7 +29,10 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
||||
. "github.com/vmware-tanzu/velero/test"
|
||||
@@ -101,9 +107,17 @@ func RunKibishiiTests(
|
||||
}
|
||||
}()
|
||||
fmt.Printf("KibishiiPrepareBeforeBackup %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||
if err := KibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName,
|
||||
kibishiiNamespace, registryCredentialFile, veleroFeatures,
|
||||
kibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData); err != nil {
|
||||
if err := KibishiiPrepareBeforeBackup(
|
||||
oneHourTimeout,
|
||||
client,
|
||||
providerName,
|
||||
kibishiiNamespace,
|
||||
registryCredentialFile,
|
||||
veleroFeatures,
|
||||
kibishiiDirectory,
|
||||
DefaultKibishiiData,
|
||||
veleroCfg.ImageRegistryProxy,
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", kibishiiNamespace)
|
||||
}
|
||||
fmt.Printf("KibishiiPrepareBeforeBackup done %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||
@@ -263,8 +277,15 @@ func RunKibishiiTests(
|
||||
return nil
|
||||
}
|
||||
|
||||
func installKibishii(ctx context.Context, namespace string, cloudPlatform, veleroFeatures,
|
||||
kibishiiDirectory string, useVolumeSnapshots bool, workerReplicas int) error {
|
||||
func installKibishii(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
cloudPlatform,
|
||||
veleroFeatures,
|
||||
kibishiiDirectory string,
|
||||
workerReplicas int,
|
||||
imageRegistryProxy string,
|
||||
) error {
|
||||
if strings.EqualFold(cloudPlatform, Azure) &&
|
||||
strings.EqualFold(veleroFeatures, FeatureCSI) {
|
||||
cloudPlatform = AzureCSI
|
||||
@@ -273,9 +294,32 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
|
||||
strings.EqualFold(veleroFeatures, FeatureCSI) {
|
||||
cloudPlatform = AwsCSI
|
||||
}
|
||||
|
||||
if strings.EqualFold(cloudPlatform, Vsphere) {
|
||||
if strings.HasPrefix(kibishiiDirectory, "https://") {
|
||||
return errors.New("vSphere needs to download the Kibishii repository first because it needs to inject some image patch file to work.")
|
||||
}
|
||||
|
||||
kibishiiImage := readBaseKibishiiImage(path.Join(kibishiiDirectory, "base", "kibishii.yaml"))
|
||||
if err := generateKibishiiImagePatch(
|
||||
path.Join(imageRegistryProxy, kibishiiImage),
|
||||
path.Join(kibishiiDirectory, cloudPlatform, "worker-image-patch.yaml"),
|
||||
); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
jumpPadImage := readBaseJumpPadImage(path.Join(kibishiiDirectory, "base", "jump-pad.yaml"))
|
||||
if err := generateJumpPadPatch(
|
||||
path.Join(imageRegistryProxy, jumpPadImage),
|
||||
path.Join(kibishiiDirectory, cloudPlatform, "jump-pad-image-patch.yaml"),
|
||||
); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
|
||||
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
|
||||
kibishiiDirectory+cloudPlatform, "--timeout=90s")
|
||||
path.Join(kibishiiDirectory, cloudPlatform), "--timeout=90s")
|
||||
_, stderr, err := veleroexec.RunCommand(kibishiiInstallCmd)
|
||||
fmt.Printf("Install Kibishii cmd: %s\n", kibishiiInstallCmd)
|
||||
if err != nil {
|
||||
@@ -312,16 +356,134 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
|
||||
return err
|
||||
}
|
||||
|
||||
func readBaseKibishiiImage(kibishiiFilePath string) string {
|
||||
bytes, err := os.ReadFile(kibishiiFilePath)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
sts := &appsv1api.StatefulSet{}
|
||||
if err := yaml.UnmarshalStrict(bytes, sts); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
kibishiiImage := ""
|
||||
if len(sts.Spec.Template.Spec.Containers) > 0 {
|
||||
kibishiiImage = sts.Spec.Template.Spec.Containers[0].Image
|
||||
}
|
||||
|
||||
return kibishiiImage
|
||||
}
|
||||
|
||||
func readBaseJumpPadImage(jumpPadFilePath string) string {
|
||||
bytes, err := os.ReadFile(jumpPadFilePath)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
pod := &corev1api.Pod{}
|
||||
if err := yaml.UnmarshalStrict(bytes, pod); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
jumpPadImage := ""
|
||||
if len(pod.Spec.Containers) > 0 {
|
||||
jumpPadImage = pod.Spec.Containers[0].Image
|
||||
}
|
||||
|
||||
return jumpPadImage
|
||||
}
|
||||
|
||||
type patchImageData struct {
|
||||
Image string
|
||||
}
|
||||
|
||||
func generateKibishiiImagePatch(kibishiiImage string, patchDirectory string) error {
|
||||
patchString := `
|
||||
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: kibishii-deployment
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kibishii
|
||||
image: {{.Image}}
|
||||
`
|
||||
|
||||
file, err := os.OpenFile(patchDirectory, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
defer file.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchTemplate, err := template.New("imagePatch").Parse(patchString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := patchTemplate.Execute(file, patchImageData{Image: kibishiiImage}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateJumpPadPatch(jumpPadImage string, patchDirectory string) error {
|
||||
patchString := `
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: jump-pad
|
||||
spec:
|
||||
containers:
|
||||
- name: jump-pad
|
||||
image: {{.Image}}
|
||||
`
|
||||
file, err := os.OpenFile(patchDirectory, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
defer file.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchTemplate, err := template.New("imagePatch").Parse(patchString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := patchTemplate.Execute(file, patchImageData{Image: jumpPadImage}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error {
|
||||
timeout := 30 * time.Minute
|
||||
interval := 1 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20)
|
||||
defer ctxCancel()
|
||||
kibishiiGenerateCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
|
||||
"/usr/local/bin/generate.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel),
|
||||
strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength),
|
||||
strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum), strconv.Itoa(kibishiiData.ExpectedNodes))
|
||||
kibishiiGenerateCmd := exec.CommandContext(
|
||||
timeout,
|
||||
"kubectl",
|
||||
"exec",
|
||||
"-n",
|
||||
namespace,
|
||||
"jump-pad",
|
||||
"--",
|
||||
"/usr/local/bin/generate.sh",
|
||||
strconv.Itoa(kibishiiData.Levels),
|
||||
strconv.Itoa(kibishiiData.DirsPerLevel),
|
||||
strconv.Itoa(kibishiiData.FilesPerLevel),
|
||||
strconv.Itoa(kibishiiData.FileLength),
|
||||
strconv.Itoa(kibishiiData.BlockSize),
|
||||
strconv.Itoa(kibishiiData.PassNum),
|
||||
strconv.Itoa(kibishiiData.ExpectedNodes),
|
||||
)
|
||||
fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd)
|
||||
|
||||
stdout, stderr, err := veleroexec.RunCommand(kibishiiGenerateCmd)
|
||||
@@ -341,26 +503,44 @@ func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiD
|
||||
func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error {
|
||||
timeout := 10 * time.Minute
|
||||
interval := 5 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20)
|
||||
defer ctxCancel()
|
||||
kibishiiVerifyCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
|
||||
"/usr/local/bin/verify.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel),
|
||||
strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength),
|
||||
strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum),
|
||||
strconv.Itoa(kibishiiData.ExpectedNodes))
|
||||
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)
|
||||
err := wait.PollUntilContextTimeout(
|
||||
ctx,
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
timeout, ctxCancel := context.WithTimeout(context.Background(), time.Minute*20)
|
||||
defer ctxCancel()
|
||||
kibishiiVerifyCmd := exec.CommandContext(
|
||||
timeout,
|
||||
"kubectl",
|
||||
"exec",
|
||||
"-n",
|
||||
namespace,
|
||||
"jump-pad",
|
||||
"--",
|
||||
"/usr/local/bin/verify.sh",
|
||||
strconv.Itoa(kibishiiData.Levels),
|
||||
strconv.Itoa(kibishiiData.DirsPerLevel),
|
||||
strconv.Itoa(kibishiiData.FilesPerLevel),
|
||||
strconv.Itoa(kibishiiData.FileLength),
|
||||
strconv.Itoa(kibishiiData.BlockSize),
|
||||
strconv.Itoa(kibishiiData.PassNum),
|
||||
strconv.Itoa(kibishiiData.ExpectedNodes),
|
||||
)
|
||||
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)
|
||||
|
||||
stdout, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd)
|
||||
if strings.Contains(stderr, "Timeout occurred") {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Kibishi verify stdout Timeout occurred: %s stderr: %s err: %s\n", stdout, stderr, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
stdout, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd)
|
||||
if strings.Contains(stderr, "Timeout occurred") {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Kibishi verify stdout Timeout occurred: %s stderr: %s err: %s\n", stdout, stderr, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to verify kibishii data in namespace %s\n", namespace)
|
||||
@@ -370,7 +550,12 @@ func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiDat
|
||||
}
|
||||
|
||||
func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespace string) error {
|
||||
return WaitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"})
|
||||
return WaitForPods(
|
||||
ctx,
|
||||
client,
|
||||
kibishiiNamespace,
|
||||
[]string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"},
|
||||
)
|
||||
}
|
||||
|
||||
func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error {
|
||||
@@ -382,9 +567,17 @@ func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace stri
|
||||
return nil
|
||||
}
|
||||
|
||||
func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClient,
|
||||
providerName, kibishiiNamespace, registryCredentialFile, veleroFeatures,
|
||||
kibishiiDirectory string, useVolumeSnapshots bool, kibishiiData *KibishiiData) error {
|
||||
func KibishiiPrepareBeforeBackup(
|
||||
oneHourTimeout context.Context,
|
||||
client TestClient,
|
||||
providerName,
|
||||
kibishiiNamespace,
|
||||
registryCredentialFile,
|
||||
veleroFeatures,
|
||||
kibishiiDirectory string,
|
||||
kibishiiData *KibishiiData,
|
||||
imageRegistryProxy string,
|
||||
) error {
|
||||
fmt.Printf("installKibishii %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||
serviceAccountName := "default"
|
||||
|
||||
@@ -398,8 +591,15 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie
|
||||
return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, kibishiiNamespace)
|
||||
}
|
||||
|
||||
if err := installKibishii(oneHourTimeout, kibishiiNamespace, providerName, veleroFeatures,
|
||||
kibishiiDirectory, useVolumeSnapshots, kibishiiData.ExpectedNodes); err != nil {
|
||||
if err := installKibishii(
|
||||
oneHourTimeout,
|
||||
kibishiiNamespace,
|
||||
providerName,
|
||||
veleroFeatures,
|
||||
kibishiiDirectory,
|
||||
kibishiiData.ExpectedNodes,
|
||||
imageRegistryProxy,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "Failed to install Kibishii workload")
|
||||
}
|
||||
// wait for kibishii pod startup
|
||||
|
||||
@@ -84,7 +84,7 @@ func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyC
|
||||
}
|
||||
}
|
||||
|
||||
pluginsTmp, err := getPlugins(ctx, *veleroCfg)
|
||||
pluginsTmp, err := GetPlugins(ctx, *veleroCfg, true)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "Failed to get provider plugins")
|
||||
}
|
||||
@@ -120,29 +120,49 @@ func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyC
|
||||
return errors.WithMessagef(err, "Failed to get Velero InstallOptions for plugin provider %s", veleroCfg.ObjectStoreProvider)
|
||||
}
|
||||
|
||||
_, err = k8s.GetNamespace(ctx, *veleroCfg.ClientToInstallVelero, veleroCfg.VeleroNamespace)
|
||||
// We should uninstall Velero for a new installation
|
||||
if !apierrors.IsNotFound(err) {
|
||||
if err := VeleroUninstall(context.Background(), *veleroCfg); err != nil {
|
||||
return errors.Wrapf(err, "Failed to uninstall velero %s", veleroCfg.VeleroNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
// If velero namespace does not exist, we should create it for service account creation
|
||||
if err := k8s.KubectlCreateNamespace(ctx, veleroCfg.VeleroNamespace); err != nil {
|
||||
return errors.Wrapf(err, "Failed to create namespace %s to install Velero", veleroCfg.VeleroNamespace)
|
||||
}
|
||||
|
||||
// Create Backup Repository ConfigurationMap.
|
||||
if _, err := k8s.CreateConfigMap(
|
||||
veleroCfg.ClientToInstallVelero.ClientGo,
|
||||
veleroCfg.VeleroNamespace,
|
||||
test.BackupRepositoryConfigName,
|
||||
nil,
|
||||
map[string]string{
|
||||
test.UploaderTypeKopia: "{\"cacheLimitMB\": 2048, \"fullMaintenanceInterval\": \"normalGC\"}",
|
||||
},
|
||||
); err != nil {
|
||||
return errors.WithMessagef(err,
|
||||
"Failed to create %s ConfigMap in %s namespace",
|
||||
test.BackupRepositoryConfigName,
|
||||
veleroCfg.VeleroNamespace,
|
||||
)
|
||||
}
|
||||
|
||||
// For AWS IRSA credential test, AWS IAM service account is required, so if ServiceAccountName and EKSPolicyARN
|
||||
// are both provided, we assume IRSA test is running, otherwise skip this IAM service account creation part.
|
||||
if veleroCfg.CloudProvider == test.AWS && veleroInstallOptions.ServiceAccountName != "" {
|
||||
if veleroCfg.EKSPolicyARN == "" {
|
||||
return errors.New("Please provide EKSPolicyARN for IRSA test.")
|
||||
}
|
||||
_, err = k8s.GetNamespace(ctx, *veleroCfg.ClientToInstallVelero, veleroCfg.VeleroNamespace)
|
||||
// We should uninstall Velero for a new service account creation.
|
||||
if !apierrors.IsNotFound(err) {
|
||||
if err := VeleroUninstall(context.Background(), *veleroCfg); err != nil {
|
||||
return errors.Wrapf(err, "Failed to uninstall velero %s", veleroCfg.VeleroNamespace)
|
||||
}
|
||||
}
|
||||
// If velero namespace does not exist, we should create it for service account creation
|
||||
if err := k8s.KubectlCreateNamespace(ctx, veleroCfg.VeleroNamespace); err != nil {
|
||||
return errors.Wrapf(err, "Failed to create namespace %s to install Velero", veleroCfg.VeleroNamespace)
|
||||
}
|
||||
if err := k8s.KubectlDeleteClusterRoleBinding(ctx, "velero-cluster-role"); err != nil {
|
||||
return errors.Wrapf(err, "Failed to delete clusterrolebinding %s to %s namespace", "velero-cluster-role", veleroCfg.VeleroNamespace)
|
||||
}
|
||||
if err := k8s.KubectlCreateClusterRoleBinding(ctx, "velero-cluster-role", "cluster-admin", veleroCfg.VeleroNamespace, veleroInstallOptions.ServiceAccountName); err != nil {
|
||||
return errors.Wrapf(err, "Failed to create clusterrolebinding %s to %s namespace", "velero-cluster-role", veleroCfg.VeleroNamespace)
|
||||
}
|
||||
|
||||
if err := eksutil.KubectlDeleteIAMServiceAcount(ctx, veleroInstallOptions.ServiceAccountName, veleroCfg.VeleroNamespace, veleroCfg.ClusterToInstallVelero); err != nil {
|
||||
return errors.Wrapf(err, "Failed to delete service account %s to %s namespace", veleroInstallOptions.ServiceAccountName, veleroCfg.VeleroNamespace)
|
||||
}
|
||||
@@ -367,6 +387,14 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options
|
||||
args = append(args, fmt.Sprintf("--uploader-type=%v", options.UploaderType))
|
||||
}
|
||||
|
||||
if options.ItemBlockWorkerCount > 1 {
|
||||
args = append(args, fmt.Sprintf("--item-block-worker-count=%d", options.ItemBlockWorkerCount))
|
||||
}
|
||||
|
||||
if options.BackupRepoConfigMap != "" {
|
||||
args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", options.BackupRepoConfigMap))
|
||||
}
|
||||
|
||||
if err := createVeleroResources(ctx, cli, namespace, args, options); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
@@ -57,81 +58,65 @@ const RestoreObjectsPrefix = "restores"
|
||||
const PluginsObjectsPrefix = "plugins"
|
||||
|
||||
var ImagesMatrix = map[string]map[string][]string{
|
||||
"v1.10": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.6.0"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.6.0"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.1"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.6.0"},
|
||||
"csi": {"gcr.io/velero-gcp/velero-plugin-for-csi:v0.4.0"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.10.2"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:v1.10.2"},
|
||||
},
|
||||
"v1.11": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.7.0"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.7.0"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.1"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.7.0"},
|
||||
"csi": {"gcr.io/velero-gcp/velero-plugin-for-csi:v0.5.0"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.11.1"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:v1.11.1"},
|
||||
},
|
||||
"v1.12": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.8.0"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.8.0"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.1"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.8.0"},
|
||||
"csi": {"gcr.io/velero-gcp/velero-plugin-for-csi:v0.6.0"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.12.4"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:v1.12.4"},
|
||||
},
|
||||
"v1.13": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.9.2"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.9.2"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.9.2"},
|
||||
"csi": {"gcr.io/velero-gcp/velero-plugin-for-csi:v0.7.1"},
|
||||
"datamover": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.9.2"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.13.2"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:v1.13.2"},
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.9.2"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.9.2"},
|
||||
"vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.9.2"},
|
||||
"csi": {"velero/velero-plugin-for-csi:v0.7.1"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:v1.9.2"},
|
||||
"velero": {"velero/velero:v1.13.2"},
|
||||
"velero-restore-helper": {"velero/velero-restore-helper:v1.13.2"},
|
||||
},
|
||||
"v1.14": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.10.1"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.10.1"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.10.1"},
|
||||
"datamover": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.10.1"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.14.1"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:v1.14.1"},
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.10.1"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.10.1"},
|
||||
"vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.10.1"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:v1.10.1"},
|
||||
"velero": {"velero/velero:v1.14.1"},
|
||||
"velero-restore-helper": {"velero/velero-restore-helper:v1.14.1"},
|
||||
},
|
||||
"v1.15": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.11.0"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.11.0"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.11.0"},
|
||||
"datamover": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.11.0"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.15.2"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:v1.15.2"},
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.11.0"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.11.0"},
|
||||
"vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.11.0"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:v1.11.0"},
|
||||
"velero": {"velero/velero:v1.15.2"},
|
||||
"velero-restore-helper": {"velero/velero-restore-helper:v1.15.2"},
|
||||
},
|
||||
"v1.16": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.12.0"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:v1.12.0"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:v1.12.0"},
|
||||
"datamover": {"gcr.io/velero-gcp/velero-plugin-for-aws:v1.12.0"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:v1.15.0"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero:v1.16.0"},
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.12.0"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.12.0"},
|
||||
"vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.12.0"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:v1.12.0"},
|
||||
"velero": {"velero/velero:v1.15.0"},
|
||||
"velero-restore-helper": {"velero/velero:v1.16.0"},
|
||||
},
|
||||
"main": {
|
||||
"aws": {"gcr.io/velero-gcp/velero-plugin-for-aws:main"},
|
||||
"azure": {"gcr.io/velero-gcp/velero-plugin-for-microsoft-azure:main"},
|
||||
"vsphere": {"gcr.io/velero-gcp/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"gcr.io/velero-gcp/velero-plugin-for-gcp:main"},
|
||||
"datamover": {"gcr.io/velero-gcp/velero-plugin-for-aws:main"},
|
||||
"velero": {"gcr.io/velero-gcp/velero:main"},
|
||||
"velero-restore-helper": {"gcr.io/velero-gcp/velero-restore-helper:main"},
|
||||
"aws": {"velero/velero-plugin-for-aws:main"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:main"},
|
||||
"vsphere": {"vsphereveleroplugin/velero-plugin-for-vsphere:v1.5.2"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:main"},
|
||||
"datamover": {"velero/velero-plugin-for-aws:main"},
|
||||
"velero": {"velero/velero:main"},
|
||||
"velero-restore-helper": {"velero/velero-restore-helper:main"},
|
||||
},
|
||||
}
|
||||
|
||||
// UpdateImagesMatrixByProxy is used to append the proxy to the image lists.
|
||||
func UpdateImagesMatrixByProxy(imageRegistryProxy string) {
|
||||
if imageRegistryProxy != "" {
|
||||
for i := range ImagesMatrix {
|
||||
for j := range ImagesMatrix[i] {
|
||||
ImagesMatrix[i][j][0] = path.Join(imageRegistryProxy, ImagesMatrix[i][j][0])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SetImagesToDefaultValues(config VeleroConfig, version string) (VeleroConfig, error) {
|
||||
fmt.Printf("Get the images for version %s\n", version)
|
||||
|
||||
@@ -672,37 +657,44 @@ func VeleroVersion(ctx context.Context, veleroCLI, veleroNamespace string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// getProviderPlugins only provide plugin for specific cloud provider
|
||||
func getProviderPlugins(ctx context.Context, veleroCLI string, cloudProvider string) ([]string, error) {
|
||||
if cloudProvider == "" {
|
||||
return []string{}, errors.New("CloudProvider should be provided")
|
||||
}
|
||||
// GetPlugins will collect all kinds plugins for VeleroInstall, such as provider
|
||||
// plugins(cloud provider/object store provider, if object store provider is not
|
||||
// provided, it should be set to value as cloud provider's), feature plugins (CSI/Datamover)
|
||||
func GetPlugins(ctx context.Context, veleroCfg VeleroConfig, defaultBSL bool) ([]string, error) {
|
||||
veleroCLI := veleroCfg.VeleroCLI
|
||||
cloudProvider := veleroCfg.CloudProvider
|
||||
objectStoreProvider := veleroCfg.ObjectStoreProvider
|
||||
providerPlugins := veleroCfg.Plugins
|
||||
needDataMoverPlugin := false
|
||||
var plugins []string
|
||||
|
||||
version, err := GetVeleroVersion(ctx, veleroCLI, true)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed to get velero version")
|
||||
}
|
||||
|
||||
plugins, err := getPluginsByVersion(version, cloudProvider, false)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "Fail to get plugin by provider %s and version %s", cloudProvider, version)
|
||||
// Read the plugins for the additional BSL here.
|
||||
if !defaultBSL {
|
||||
fmt.Printf("Additional BSL provider = %s\n", veleroCfg.AdditionalBSLProvider)
|
||||
fmt.Printf("Additional BSL plugins = %v\n", veleroCfg.AddBSLPlugins)
|
||||
|
||||
if veleroCfg.AddBSLPlugins == "" {
|
||||
if veleroCfg.AdditionalBSLProvider == "" {
|
||||
return []string{}, errors.New("AdditionalBSLProvider should be provided.")
|
||||
}
|
||||
|
||||
plugins, err = getPluginsByVersion(version, veleroCfg.AdditionalBSLProvider, false)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "Fail to get plugin by provider %s and version %s", veleroCfg.AdditionalBSLProvider, version)
|
||||
}
|
||||
} else {
|
||||
plugins = append(plugins, veleroCfg.AddBSLPlugins)
|
||||
}
|
||||
|
||||
return plugins, nil
|
||||
}
|
||||
|
||||
return plugins, nil
|
||||
}
|
||||
|
||||
// getPlugins will collect all kinds plugins for VeleroInstall, such as provider
|
||||
// plugins(cloud provider/object store provider, if object store provider is not
|
||||
// provided, it should be set to value as cloud provider's), feature plugins (CSI/Datamover)
|
||||
func getPlugins(ctx context.Context, veleroCfg VeleroConfig) ([]string, error) {
|
||||
veleroCLI := veleroCfg.VeleroCLI
|
||||
cloudProvider := veleroCfg.CloudProvider
|
||||
objectStoreProvider := veleroCfg.ObjectStoreProvider
|
||||
providerPlugins := veleroCfg.Plugins
|
||||
needDataMoverPlugin := false
|
||||
|
||||
// Fetch the plugins for the provider before checking for the object store provider below.
|
||||
var plugins []string
|
||||
if len(providerPlugins) > 0 {
|
||||
plugins = strings.Split(providerPlugins, ",")
|
||||
} else {
|
||||
@@ -713,47 +705,30 @@ func getPlugins(ctx context.Context, veleroCfg VeleroConfig) ([]string, error) {
|
||||
objectStoreProvider = cloudProvider
|
||||
}
|
||||
|
||||
var version string
|
||||
var err error
|
||||
if veleroCfg.VeleroVersion != "" {
|
||||
version = veleroCfg.VeleroVersion
|
||||
} else {
|
||||
version, err = GetVeleroVersion(ctx, veleroCLI, true)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed to get velero version")
|
||||
}
|
||||
}
|
||||
|
||||
if veleroCfg.SnapshotMoveData && veleroCfg.DataMoverPlugin == "" && !veleroCfg.IsUpgradeTest {
|
||||
needDataMoverPlugin = true
|
||||
}
|
||||
|
||||
plugins, err = getPluginsByVersion(version, cloudProvider, needDataMoverPlugin)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "Fail to get plugin by provider %s and version %s", objectStoreProvider, version)
|
||||
}
|
||||
}
|
||||
|
||||
return plugins, nil
|
||||
}
|
||||
|
||||
// VeleroAddPluginsForProvider determines which plugins need to be installed for a provider and
|
||||
// installs them in the current Velero installation, skipping over those that are already installed.
|
||||
func VeleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNamespace string, provider string, plugin string) error {
|
||||
var err error
|
||||
var plugins []string
|
||||
if plugin == "" {
|
||||
plugins, err = getProviderPlugins(ctx, veleroCLI, provider)
|
||||
} else {
|
||||
plugins = append(plugins, plugin)
|
||||
}
|
||||
fmt.Printf("provider cmd = %v\n", provider)
|
||||
fmt.Printf("plugins cmd = %v\n", plugins)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "Failed to get plugins")
|
||||
}
|
||||
// AddPlugins installs them in the current Velero installation, skipping over those that are already installed.
|
||||
func AddPlugins(plugins []string, veleroCfg VeleroConfig) error {
|
||||
for _, plugin := range plugins {
|
||||
stdoutBuf := new(bytes.Buffer)
|
||||
stderrBuf := new(bytes.Buffer)
|
||||
|
||||
installPluginCmd := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "plugin", "add", plugin, "--confirm")
|
||||
installPluginCmd := exec.CommandContext(context.TODO(), veleroCfg.VeleroCLI, "--namespace", veleroCfg.VeleroNamespace, "plugin", "add", plugin, "--confirm")
|
||||
fmt.Printf("installPluginCmd cmd =%v\n", installPluginCmd)
|
||||
installPluginCmd.Stdout = stdoutBuf
|
||||
installPluginCmd.Stderr = stderrBuf
|
||||
@@ -1446,14 +1421,6 @@ func UpdateVeleroDeployment(ctx context.Context, veleroCfg VeleroConfig) ([]stri
|
||||
}
|
||||
cmds = append(cmds, cmd)
|
||||
|
||||
args := fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"gcr.io\\/velero-gcp\\/nightly\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
|
||||
|
||||
cmd = &common.OsCommandLine{
|
||||
Cmd: "sed",
|
||||
Args: []string{args},
|
||||
}
|
||||
cmds = append(cmds, cmd)
|
||||
|
||||
cmd = &common.OsCommandLine{
|
||||
Cmd: "sed",
|
||||
Args: []string{fmt.Sprintf("s#\\\"server\\\",#\\\"server\\\",\\\"--uploader-type=%s\\\",#g", veleroCfg.UploaderType)},
|
||||
@@ -1496,14 +1463,6 @@ func UpdateNodeAgent(ctx context.Context, veleroCfg VeleroConfig, dsjson string)
|
||||
}
|
||||
cmds = append(cmds, cmd)
|
||||
|
||||
args := fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"gcr.io\\/velero-gcp\\/nightly\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
|
||||
|
||||
cmd = &common.OsCommandLine{
|
||||
Cmd: "sed",
|
||||
Args: []string{args},
|
||||
}
|
||||
cmds = append(cmds, cmd)
|
||||
|
||||
cmd = &common.OsCommandLine{
|
||||
Cmd: "sed",
|
||||
Args: []string{"s#\\\"name\\\"\\: \\\"restic\\\"#\\\"name\\\"\\: \\\"node-agent\\\"#g"},
|
||||
|
||||
Reference in New Issue
Block a user