Compare commits

...

58 Commits

Author SHA1 Message Date
Wenkai Yin(尹文开)
8f9e9378e8 Merge pull request #7055 from kaovilai/warnOnCreateAlreadyExistsGetError-release-1.11
release-1.11: restore: Use warning when Create IsAlreadyExist and Get error (#7004)
2023-11-03 09:25:13 +08:00
Tiger Kaovilai
d7dd050216 restore: Use warning when Create IsAlreadyExist and Get error (#7004)
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
2023-11-02 16:05:19 -04:00
Daniel Jiang
c6ec8f23bf Merge pull request #6992 from Lyndon-Li/release-1.11
[1.11] Issue 6988: udmrepo use region specified in BSL when s3URL is empty
2023-10-23 11:33:06 +08:00
Lyndon-Li
fb2012c09f udmrepo use region specified in BSL when s3URL is empty
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-10-20 20:45:59 +08:00
lyndon
a8cdfc29c3 Merge pull request #6889 from Lyndon-Li/release-1.11
Flush repo after policy is set
2023-09-28 15:17:04 +08:00
lyndon
f1771d5348 Merge branch 'release-1.11' into release-1.11 2023-09-28 14:51:15 +08:00
Lyndon-Li
e9afa77407 flush repo after policy is set
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-09-28 14:47:58 +08:00
Xun Jiang/Bruce Jiang
19c8836624 Add PSA audit and warn labels. (#6773)
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-09-07 14:04:08 -04:00
lyndon
4e23bbb8a4 Merge pull request #6759 from Lyndon-Li/release-1.11
[1.11] Fix issue 6753
2023-09-05 11:16:28 +08:00
Lyndon-Li
c9a8eef1e9 fix issue 6753
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-09-05 10:52:44 +08:00
qiuming
5a55d03cde Merge pull request #6722 from danfengliu/fix-default-storageclass-issue
Fix missing default storageclass issue
2023-08-31 09:46:11 +08:00
danfengl
80ccbd2b70 Fix missing default storageclass issue
Signed-off-by: danfengl <danfengl@vmware.com>
2023-08-30 10:55:57 +00:00
danfengliu
e6b510e32b Merge pull request #6684 from danfengliu/fix-storage-class-failure-issue
Replace pod with deployment for E2E test
2023-08-25 16:45:29 +08:00
danfengl
6ef013d7c0 Replace pod with deployment for E2E test
Signed-off-by: danfengl <danfengl@vmware.com>
2023-08-25 07:45:44 +00:00
lyndon
2e5aeb799a Merge pull request #6566 from kaovilai/s3profilefix-1.11
GetS3Credentials pass profile from config to NewSharedCredentials
2023-08-07 16:25:30 +08:00
Daniel Jiang
aca627922e Merge branch 'release-1.11' into s3profilefix-1.11 2023-08-07 14:14:38 +08:00
lyndon
d90197732f Merge pull request #6597 from Lyndon-Li/release-1.11
[1.11] Fix issue 6571
2023-08-03 15:48:39 +08:00
Lyndon-Li
37d94edb70 fix issue 6571
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-08-03 15:25:02 +08:00
lyndon
034018de55 Merge pull request #6593 from Lyndon-Li/release-1.11
[1.11] Fix issue 6575
2023-08-03 10:52:37 +08:00
Lyndon-Li
598023710a fix issue 6575
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-08-03 10:30:05 +08:00
Tiger Kaovilai
408bafa29c GetS3Credentials pass profile from config to NewSharedCredentials
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
2023-07-31 10:18:03 -04:00
lyndon
0d67b73962 Merge pull request #6543 from Lyndon-Li/release-1.11
[release-1.11] Fix issue 6534
2023-07-25 18:44:23 +08:00
lyndon
2ef2fdcb46 Merge branch 'release-1.11' into release-1.11 2023-07-25 17:33:08 +08:00
Lyndon-Li
e90bcc18ef fix issue 6534
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-07-25 17:28:12 +08:00
lyndon
14ca531e09 Merge pull request #6528 from Lyndon-Li/release-1.11
[Cherry-pick] Restict namespace to node-agent cache
2023-07-25 17:23:21 +08:00
Lyndon-Li
990fdfb593 restict namespace to node-agent cache
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-07-20 09:55:51 +08:00
Xun Jiang/Bruce Jiang
bdbe7eb242 Add v1.11.1 changelog. (#6522)
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-07-19 14:46:26 +08:00
danfengliu
5afe837f76 Merge pull request #6516 from blackpiglet/v1.11_change_push_to_gcr
Integrate pushing to docker hub and gcr.io in one docker build and push command
2023-07-18 15:33:14 +08:00
Xun Jiang
350cb6dec6 Integrate pushing to docker hub and gcr.io in one docker build and push command.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-07-18 14:20:03 +08:00
Xun Jiang/Bruce Jiang
ef23da3289 Fix release-1.11 push github action out of space issue. (#6500)
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-07-15 07:23:45 +08:00
Xun Jiang/Bruce Jiang
6862fb84b9 Merge pull request #6488 from kayrus/openstack-cis-zone-labels-1.11
Add support for OpenStack CSI drivers topology keys
2023-07-14 17:00:36 +08:00
lyndon
c8e405c89b Merge branch 'release-1.11' into openstack-cis-zone-labels-1.11 2023-07-14 16:43:33 +08:00
Xun Jiang/Bruce Jiang
5836a2a0c9 Merge pull request #6485 from blackpiglet/release-1.11_bump_golang
Bump Golang to v1.20.6 for release-1.11.
2023-07-13 11:24:08 +08:00
kayrus
a1e08f4eec Add support for OpenStack CSI drivers topology keys
Signed-off-by: kayrus <kayrus@users.noreply.github.com>
2023-07-12 22:33:11 +02:00
Xun Jiang
61a08ccc30 Bump Golang to v1.20.6 for release-1.11.
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-07-12 09:43:40 +08:00
Daniel Jiang
46a355c293 Merge pull request #6477 from Lyndon-Li/release-1.11
[Cherry-pick] Fix-issue-6297
2023-07-10 21:08:56 +08:00
Lyndon-Li
1cb966da57 fix-issue-6297
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-07-10 16:28:20 +08:00
Wenkai Yin(尹文开)
286db706e9 Restore Endpoints before Services (#6316)
Restore Endpoints before Services

Fixes #6280

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2023-06-20 14:34:50 +08:00
Xun Jiang/Bruce Jiang
f33ea376e9 Merge pull request #6322 from blackpiglet/psa_violation_fix_release_1.11
[cherry-pick][release-1.11]Make the E2E testing pods obey the restricted pod security standard.
2023-05-31 15:46:20 +08:00
Xun Jiang/Bruce Jiang
1349e570f9 Merge branch 'release-1.11' into psa_violation_fix_release_1.11 2023-05-31 15:31:37 +08:00
Shubham Pampattiwar
ba8465b87d Merge pull request #6324 from blackpiglet/6276_cherry_pick_release_1.11
[cherry-pick][release-1.11]Fix status.progress not getting updated for backup
2023-05-30 09:43:16 -07:00
kkothule
7dccc17690 Fix status.progress not getting updated for backup
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-05-30 15:47:38 +08:00
Xun Jiang
9b922782e1 Make the E2E testing pods obey the restricted pod security standard.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-05-30 15:29:16 +08:00
qiuming
dc0a712089 Merge pull request #6189 from Lyndon-Li/release-1.11
[1.11] Fix issue 6182
2023-04-26 10:23:42 +08:00
Lyndon-Li
d6755f7953 Fix issue 6182
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-04-25 16:12:37 +08:00
Xun Jiang/Bruce Jiang
6ac085316d Update Golang to v1.20 for release-1.11 branch. (#6159)
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
Co-authored-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-23 12:07:48 +08:00
Xun Jiang/Bruce Jiang
0da2baa908 Merge pull request #6138 from danfengliu/cherry-pick-schedule-test
[Cherry-pick 1.11] Add E2E test for schedule backup creation principle
2023-04-13 09:49:32 +08:00
danfengl
8628388445 [Cherry-pick 1.11]Add E2E test for schedule backup creation principle
Signed-off-by: danfengl <danfengl@vmware.com>
2023-04-13 09:30:58 +08:00
Shubham Pampattiwar
495063b4f6 Merge pull request #6140 from blackpiglet/add_5865_in_release_note
Add PR #5865 in release note.
2023-04-12 13:14:03 -04:00
Xun Jiang
87794d4615 Add PR #5865 in release note.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-12 21:54:14 +08:00
Daniel Jiang
c3e7fd7a74 Merge pull request #6110 from ywk253100/230411_mf
Ignore not found error during patching managedFields
2023-04-11 21:30:01 +08:00
Wenkai Yin(尹文开)
5c0c378797 Ignore not found error during patching managedFields
Ignore not found error during patching managedFields

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2023-04-11 17:42:02 +08:00
lyndon
7d0d56e5fa Merge pull request #6104 from blackpiglet/release-1.11
Update goreleaser version in build image.
2023-04-11 09:55:13 +08:00
Xun Jiang
3c9570fd14 Update goreleaser version in build image.
Update goreleaser version to v1.15.2 in Velero build image.
Modify the path where goreleaser reads configuration file from.

Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-10 22:13:18 +08:00
Xun Jiang/Bruce Jiang
971396110f Merge pull request #6096 from blackpiglet/release-1.11
Pin Golang and distroless version for v1.11
2023-04-10 11:22:31 +08:00
Xun Jiang
9de61aa5a0 Pin Golang and distroless version for v1.11.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-10 11:05:43 +08:00
lyndon
5f3cb25311 Merge pull request #6095 from vmware-tanzu/blackpiglet-patch-1
Modify v1.11 changelog.
2023-04-10 10:26:07 +08:00
Xun Jiang/Bruce Jiang
e16cb76892 Modify resouce policy example document link in changelog.
Signed-off-by: Xun Jiang/Bruce Jiang <59276555+blackpiglet@users.noreply.github.com>
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-07 20:14:05 +08:00
75 changed files with 778 additions and 261 deletions

View File

@@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI

View File

@@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI
@@ -72,7 +72,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
- name: Check out the code
uses: actions/checkout@v2

View File

@@ -10,7 +10,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
- name: Check out the code
uses: actions/checkout@v2
@@ -32,4 +32,5 @@ jobs:
- name: Run staticcheck
uses: dominikh/staticcheck-action@v1.3.0
with:
version: "2022.1.3"
version: "2023.1.3"
install-go: false

View File

@@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
- uses: actions/checkout@v3
@@ -60,10 +60,21 @@ jobs:
files: coverage.out
verbose: true
# Use the JSON key in secret to login gcr.io
- uses: 'docker/login-action@v2'
with:
registry: 'gcr.io' # or REGION.docker.pkg.dev
username: '_json_key'
password: '${{ secrets.GCR_SA_KEY }}'
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
- name: Publish container image
if: github.repository == 'vmware-tanzu/velero'
run: |
sudo swapoff -a
sudo rm -f /mnt/swapfile
docker image prune -a --force
# Build and push Velero image to docker registry
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
VERSION=$(./hack/docker-push.sh | grep 'VERSION:' | awk -F: '{print $2}' | xargs)
@@ -87,19 +98,3 @@ jobs:
uploader ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${GCS_BUCKET}
uploader ${VELERO_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
uploader ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
# Use the JSON key in secret to login gcr.io
- uses: 'docker/login-action@v1'
with:
registry: 'gcr.io' # or REGION.docker.pkg.dev
username: '_json_key'
password: '${{ secrets.GCR_SA_KEY }}'
# Push image to GCR to facilitate some environments that have rate limitation to docker hub, e.g. vSphere.
- name: Publish container image to GCR
if: github.repository == 'vmware-tanzu/velero'
run: |
sudo swapoff -a
sudo rm -f /mnt/swapfile
docker image prune -a --force
REGISTRY=gcr.io/velero-gcp ./hack/docker-push.sh

View File

@@ -54,3 +54,10 @@ release:
name: velero
draft: true
prerelease: auto
git:
# What should be used to sort tags when gathering the current and previous
# tags if there are more than one tag in the same commit.
#
# Default: `-version:refname`
tag_sort: -version:creatordate

View File

@@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as velero-builder
FROM --platform=$BUILDPLATFORM golang:1.20.6-bullseye as velero-builder
ARG GOPROXY
ARG BIN
@@ -44,7 +44,7 @@ RUN mkdir -p /output/usr/bin && \
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN}
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as restic-builder
FROM --platform=$BUILDPLATFORM golang:1.20.6-bullseye as restic-builder
ARG BIN
ARG TARGETOS
@@ -66,7 +66,7 @@ RUN mkdir -p /output/usr/bin && \
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh
# Velero image packing section
FROM gcr.io/distroless/base-nossl-debian11:nonroot
FROM gcr.io/distroless/base-nossl-debian11@sha256:9523ef8cf054e23a81e722d231c6f604ab43a03c5b174b5c8386c78c0b6473d0
LABEL maintainer="Nolan Brubaker <brubakern@vmware.com>"

View File

@@ -22,9 +22,11 @@ PKG := github.com/vmware-tanzu/velero
# Where to push the docker image.
REGISTRY ?= velero
GCR_REGISTRY ?= gcr.io/velero-gcp
# Image name
IMAGE ?= $(REGISTRY)/$(BIN)
GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
# that pull base images from different registries.
@@ -66,8 +68,10 @@ TAG_LATEST ?= false
ifeq ($(TAG_LATEST), true)
IMAGE_TAGS ?= $(IMAGE):$(VERSION) $(IMAGE):latest
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION) $(GCR_IMAGE):latest
else
IMAGE_TAGS ?= $(IMAGE):$(VERSION)
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
endif
ifeq ($(shell docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
@@ -183,6 +187,7 @@ endif
--output=type=$(BUILDX_OUTPUT_TYPE) \
--platform $(BUILDX_PLATFORMS) \
$(addprefix -t , $(IMAGE_TAGS)) \
$(addprefix -t , $(GCR_IMAGE_TAGS)) \
--build-arg=GOPROXY=$(GOPROXY) \
--build-arg=PKG=$(PKG) \
--build-arg=BIN=$(BIN) \

View File

@@ -50,7 +50,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.19 as tilt-helper
FROM golang:1.20.6 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@@ -1,3 +1,26 @@
## v1.11.1
### 2023-07-19
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.11.1
### Container Image
`velero/velero:v1.11.1`
### Documentation
https://velero.io/docs/v1.11/
### Upgrading
https://velero.io/docs/v1.11/upgrade-to-1.11/
### All changes
* Add support for OpenStack CSI drivers topology keys (#6488, @kayrus)
* Enhance the code because of #6297, the return value of GetBucketRegion is not recorded, as a result, when it fails, we have no way to get the cause (#6477, @Lyndon-Li)
* Fixed a bug where status.progress is not getting updated for backups. (#6324, @blackpiglet)
* Restore Endpoints before Services (#6316, @ywk253100)
* Fix issue #6182. If pod is not running, don't treat it as an error, let it go and leave a warning. (#6189, @Lyndon-Li)
## v1.11
### 2023-04-07
@@ -29,17 +52,23 @@ The Progress() and Cancel() methods are needed to facilitate long-running Restor
This is intended as a replacement for the previously-approved Upload Progress Monitoring design ([Upload Progress Monitoring](https://github.com/vmware-tanzu/velero/blob/main/design/upload-progress.md)) to expand the supported use cases beyond snapshot upload to include what was previously called Async Backup/Restore Item Actions.
#### Flexible resource policy that can filter volumes to skip in the backup
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [policy API design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/handle-backup-of-volumes-by-resources-filters.md#api-design) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [Resource policies rules](https://velero.io/docs/v1.11/resource-filtering/#resource-policies) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
#### Resource Filters that can distinguish cluster scope and namespace scope resources
This feature adds four new resource filters for backup. The new filters are separated into cluster scope and namespace scope. Before this feature, Velero could not filter cluster scope resources precisely. This feature provides the ability and refactors existing resource filter parameters.
#### New parameter in installation to customize the serviceaccount name
The `velero install` sub-command now includes a new parameter,`--service-account-name`, which allows users to specify the ServiceAccountName for the Velero and node-agent pods. This feature may be particularly useful for users who utilize IRSA (IAM Roles for Service Accounts) in Amazon EKS (Elastic Kubernetes Service)."
#### Add a parameter for setting the Velero server connection with the k8s API server's timeout
In Velero, some code pieces need to communicate with the k8s API server. Before v1.11, these code pieces used hard-code timeout settings. This feature adds a resource-timeout parameter in the velero server binary to make it configurable.
#### Add resource list in the output of the restore describe command
Before this feature, Velero restore didn't have a restored resources list as the Velero backup. It's not convenient for users to learn what is restored. This feature adds the resources list and the handling result of the resources (including created, updated, failed, and skipped).
#### Support JSON format output of backup describe command
Before the Velero v1.11 release, users could not choose Velero's backup describe command's output format. The command output format is friendly for human reading, but it's not a structured output, and it's not easy for other programs to get information from it. Velero v1.11 adds a JSON format output for the backup describe command.
#### Refactor controllers with controller-runtime
In v1.11, Backup Controller and Restore controller are refactored with controller-runtime. Till v1.11, all Velero controllers use the controller-runtime framework.
@@ -59,6 +88,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
### All Changes
* Ignore not found error during patching managedFields (#6110, @ywk253100)
* Modify new scope resource filters name. (#6089, @blackpiglet)
* Make Velero not exits when EnableCSI is on and CSI snapshot not installed (#6062, @blackpiglet)
* Restore Services before Clusters (#6057, @ywk253100)
@@ -100,7 +130,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
* Enable staticcheck linter. (#5788, @blackpiglet)
* Set Kopia IgnoreUnknownTypes in ErrorHandlingPolicy to True for ignoring backup unknown file type (#5786, @qiuming-best)
* Bump up Restic version to 0.15.0 (#5784, @qiuming-best)
* Add File system backup related matrics to Grafana dashboard
* Add File system backup related metrics to Grafana dashboard
- Add metrics backup_warning_total for record of total warnings
- Add metrics backup_last_status for record of last status of the backup (#5779, @allenxu404)
* Design for Handling backup of volumes by resources filters (#5773, @qiuming-best)

View File

@@ -61,7 +61,7 @@ in progress for 1.9.
* Add rbac and annotation test cases (#4455, @mqiu)
* remove --crds-version in velero install command. (#4446, @jxun)
* Upgrade e2e test vsphere plugin (#4440, @mqiu)
* Fix e2e test failures for the inappropriate optimaze of velero install (#4438, @mqiu)
* Fix e2e test failures for the inappropriate optimize of velero install (#4438, @mqiu)
* Limit backup namespaces on test resource filtering cases (#4437, @mqiu)
* Bump up Go to 1.17 (#4431, @reasonerjt)
* Added `<backup name>`-itemsnapshots.json.gz to the backup format. This file exists

View File

@@ -0,0 +1 @@
Fix issue #6519. Restrict the client manager of node-agent server to include only Velero resources from the server's namespace, otherwise, the controllers will try to reconcile CRs from all the installed Velero namespaces.

View File

@@ -0,0 +1 @@
Fix issue #6534, reset PVB CR's StorageLocation to the latest one during backup sync as same as the backup CR

View File

@@ -0,0 +1 @@
Non default s3 credential profiles work on Unified Repository Provider (kopia)

View File

@@ -0,0 +1 @@
Fix issue 6575, flush the repo after delete the snapshot, otherwise, the changes(deleting repo snapshot) cannot be committed to the repo.

View File

@@ -0,0 +1 @@
Fix issue #6571, fix the problem for restore item operation to set the errors correctly so that they can be recorded by Velero restore and then reflect the correct status for Velero restore.

View File

@@ -0,0 +1 @@
Fix issue #6753, remove the check for read-only BSL in restore async operation controller since Velero cannot fully support read-only mode BSL in restore at present

View File

@@ -0,0 +1 @@
Add PSA audit and warn labels.

View File

@@ -0,0 +1 @@
Flush repo after policy is set so that policy is retrieved correctly by TreeForSource

View File

@@ -0,0 +1 @@
Fix #6988, always get region from BSL if it is not empty

View File

@@ -0,0 +1 @@
restore: Use warning when Create IsAlreadyExist and Get error

View File

@@ -175,7 +175,7 @@ If there are one or more, download the backup tarball from backup storage, untar
## Alternatives Considered
Another proposal for higher level `DeleteItemActions` was initially included, which would require implementors to individually download the backup tarball themselves.
Another proposal for higher level `DeleteItemActions` was initially included, which would require implementers to individually download the backup tarball themselves.
While this may be useful long term, it is not a good fit for the current goals as each plugin would be re-implementing a lot of boilerplate.
See the deletion-plugins.md file for this alternative proposal in more detail.

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.18
go 1.20
require (
cloud.google.com/go/storage v1.21.0

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=linux/amd64 golang:1.19-bullseye
FROM --platform=linux/amd64 golang:1.20.6-bullseye
ARG GOPROXY
@@ -50,7 +50,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v
RUN go install github.com/golang/protobuf/protoc-gen-go@v1.4.3
# get goreleaser
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.12.3/goreleaser_Linux_x86_64.tar.gz && \
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.15.2/goreleaser_Linux_x86_64.tar.gz && \
tar xvf goreleaser_Linux_x86_64.tar.gz && \
mv goreleaser /usr/bin/goreleaser && \
chmod +x /usr/bin/goreleaser

View File

@@ -48,12 +48,10 @@ if [[ "${PUBLISH:-}" != "TRUE" ]]; then
goreleaser release \
--clean \
--release-notes="${RELEASE_NOTES_FILE}" \
--skip-publish \
--config goreleaser.yaml
--skip-publish
else
echo "Getting ready to publish"
goreleaser release \
--clean \
--release-notes="${RELEASE_NOTES_FILE}"
--config goreleaser.yaml
fi

View File

@@ -1,6 +0,0 @@
git:
# What should be used to sort tags when gathering the current and previous
# tags if there are more than one tag in the same commit.
#
# Default: `-version:refname`
tag_sort: -version:creatordate

View File

@@ -279,12 +279,16 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
items := collector.getAllItems()
log.WithField("progress", "").Infof("Collected %d items matching the backup spec from the Kubernetes API (actual number of items backed up may be more or less depending on velero.io/exclude-from-backup annotation, plugins returning additional related items to back up, etc.)", len(items))
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
original := backupRequest.Backup.DeepCopy()
backupRequest.Backup.Status.Progress.TotalItems = len(items)
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
updated := backupRequest.Backup.DeepCopy()
if updated.Status.Progress == nil {
updated.Status.Progress = &velerov1api.BackupProgress{}
}
updated.Status.Progress.TotalItems = len(items)
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress.totalItems")
}
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
itemBackupper := &itemBackupper{
backupRequest: backupRequest,
@@ -333,12 +337,16 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
lastUpdate = &val
case <-ticker.C:
if lastUpdate != nil {
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
original := backupRequest.Backup.DeepCopy()
backupRequest.Backup.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
updated := backupRequest.Backup.DeepCopy()
if updated.Status.Progress == nil {
updated.Status.Progress = &velerov1api.BackupProgress{}
}
updated.Status.Progress.TotalItems = lastUpdate.totalItems
updated.Status.Progress.ItemsBackedUp = lastUpdate.itemsBackedUp
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
}
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
lastUpdate = nil
}
}
@@ -413,12 +421,17 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
// do a final update on progress since we may have just added some CRDs and may not have updated
// for the last few processed items.
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
original = backupRequest.Backup.DeepCopy()
backupRequest.Backup.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
updated = backupRequest.Backup.DeepCopy()
if updated.Status.Progress == nil {
updated.Status.Progress = &velerov1api.BackupProgress{}
}
updated.Status.Progress.TotalItems = len(backupRequest.BackedUpItems)
updated.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems)
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
}
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
log.WithField("progress", "").Infof("Backed up a total of %d items", len(backupRequest.BackedUpItems))

View File

@@ -448,6 +448,10 @@ const (
azureCsiZoneKey = "topology.disk.csi.azure.com/zone"
gkeCsiZoneKey = "topology.gke.io/zone"
gkeZoneSeparator = "__"
// OpenStack CSI drivers topology keys
cinderCsiZoneKey = "topology.manila.csi.openstack.org/zone"
manilaCsiZoneKey = "topology.cinder.csi.openstack.org/zone"
)
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
@@ -502,7 +506,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
if !labelFound {
var k string
log.Infof("label %q is not present on PersistentVolume", zoneLabelDeprecated)
k, pvFailureDomainZone = zoneFromPVNodeAffinity(pv, awsEbsCsiZoneKey, azureCsiZoneKey, gkeCsiZoneKey, zoneLabel, zoneLabelDeprecated)
k, pvFailureDomainZone = zoneFromPVNodeAffinity(pv, awsEbsCsiZoneKey, azureCsiZoneKey, gkeCsiZoneKey, cinderCsiZoneKey, manilaCsiZoneKey, zoneLabel, zoneLabelDeprecated)
if pvFailureDomainZone != "" {
log.Infof("zone info from nodeAffinity requirements: %s, key: %s", pvFailureDomainZone, k)
} else {

View File

@@ -132,6 +132,12 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, metri
&v1.Pod{}: {
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
},
&velerov1api.PodVolumeBackup{}: {
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
},
&velerov1api.PodVolumeRestore{}: {
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
},
},
}
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{

View File

@@ -514,10 +514,13 @@ High priorities:
- Replica sets go before deployments/other controllers so they can be explicitly
restored and be adopted by controllers.
- CAPI ClusterClasses go before Clusters.
- Endpoints go before Services so no new Endpoints will be created
- Services go before Clusters so they can be adopted by AKO-operator and no new Services will be created
for the same clusters
Low priorities:
- Tanzu ClusterBootstraps go last as it can reference any other kind of resources.
ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster
- ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster
- CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic.
Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters.
See https://github.com/kubernetes-sigs/cluster-api/issues/4105
@@ -543,6 +546,7 @@ var defaultRestorePriorities = restore.Priorities{
// in the backup.
"replicasets.apps",
"clusterclasses.cluster.x-k8s.io",
"endpoints",
"services",
},
LowPriorities: []string{

View File

@@ -532,8 +532,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B
if len(errors) > 0 {
return nil, errors
}
allLocations := &velerov1api.VolumeSnapshotLocationList{}
err := b.kbClient.List(context.Background(), allLocations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()})
volumeSnapshotLocations := &velerov1api.VolumeSnapshotLocationList{}
err := b.kbClient.List(context.Background(), volumeSnapshotLocations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()})
if err != nil {
errors = append(errors, fmt.Sprintf("error listing volume snapshot locations: %v", err))
return nil, errors
@@ -541,8 +541,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B
// build a map of provider->list of all locations for the provider
allProviderLocations := make(map[string][]*velerov1api.VolumeSnapshotLocation)
for i := range allLocations.Items {
loc := allLocations.Items[i]
for i := range volumeSnapshotLocations.Items {
loc := volumeSnapshotLocations.Items[i]
allProviderLocations[loc.Spec.Provider] = append(allProviderLocations[loc.Spec.Provider], &loc)
}

View File

@@ -479,7 +479,7 @@ func (r *backupDeletionReconciler) patchDeleteBackupRequest(ctx context.Context,
}
func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) {
//TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the bakcup resource is refactored
//TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the backup resource is refactored
// Record original json
oldData, err := json.Marshal(backup)

View File

@@ -209,6 +209,7 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
podVolumeBackup.Namespace = backup.Namespace
podVolumeBackup.ResourceVersion = ""
podVolumeBackup.Spec.BackupStorageLocation = location.Name
err = b.client.Create(ctx, podVolumeBackup, &client.CreateOptions{})
switch {

View File

@@ -477,8 +477,8 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
// Completed yet.
inProgressOperations, _, opsCompleted, opsFailed, errs := getRestoreItemOperationProgress(restoreReq.Restore, pluginManager, *restoreReq.GetItemOperationsList())
if len(errs) > 0 {
for err := range errs {
restoreLog.Error(err)
for _, err := range errs {
restoreErrors.Velero = append(restoreErrors.Velero, fmt.Sprintf("error from restore item operation: %v", err))
}
}

View File

@@ -138,17 +138,6 @@ func (r *restoreOperationsReconciler) Reconcile(ctx context.Context, req ctrl.Re
return ctrl.Result{}, errors.Wrap(err, "error getting backup info")
}
if info.location.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly {
log.Infof("Cannot check progress on Restore operations because backup storage location %s is currently in read-only mode; marking restore PartiallyFailed", info.location.Name)
restore.Status.Phase = velerov1api.RestorePhasePartiallyFailed
err := r.updateRestoreAndOperationsJSON(ctx, original, restore, nil, &itemoperationmap.OperationsForRestore{ErrsSinceUpdate: []string{"BSL is read-only"}}, false, false)
if err != nil {
log.WithError(err).Error("error updating Restore")
}
return ctrl.Result{}, nil
}
pluginManager := r.newPluginManager(r.logger)
defer pluginManager.CleanupClients()
backupStore, err := r.backupStoreGetter.Get(info.location, pluginManager, r.logger)

View File

@@ -30,7 +30,11 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
const defaultServiceAccountName = "velero"
const (
defaultServiceAccountName = "velero"
podSecurityLevel = "privileged"
podSecurityVersion = "latest"
)
var (
DefaultVeleroPodCPURequest = "500m"
@@ -146,8 +150,12 @@ func Namespace(namespace string) *corev1.Namespace {
},
}
ns.Labels["pod-security.kubernetes.io/enforce"] = "privileged"
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
ns.Labels["pod-security.kubernetes.io/enforce"] = podSecurityLevel
ns.Labels["pod-security.kubernetes.io/enforce-version"] = podSecurityVersion
ns.Labels["pod-security.kubernetes.io/audit"] = podSecurityLevel
ns.Labels["pod-security.kubernetes.io/audit-version"] = podSecurityVersion
ns.Labels["pod-security.kubernetes.io/warn"] = podSecurityLevel
ns.Labels["pod-security.kubernetes.io/warn-version"] = podSecurityVersion
return ns
}

View File

@@ -45,6 +45,10 @@ func TestResources(t *testing.T) {
// PSA(Pod Security Admission) and PSS(Pod Security Standards).
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce"], "privileged")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce-version"], "latest")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/audit"], "privileged")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/audit-version"], "latest")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/warn"], "privileged")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/warn-version"], "latest")
crb := ClusterRoleBinding(DefaultVeleroNamespace)
// The CRB is a cluster-scoped resource

View File

@@ -132,6 +132,21 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
return nil, nil
}
err := kube.IsPodRunning(pod)
if err != nil {
for _, volumeName := range volumesToBackup {
err = errors.Wrapf(err, "backup for volume %s is skipped", volumeName)
log.WithError(err).Warn("Skip pod volume")
}
return nil, nil
}
err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient)
if err != nil {
return nil, []error{err}
}
repositoryType := getRepositoryType(b.uploaderType)
if repositoryType == "" {
err := errors.Errorf("empty repository type, uploader %s", b.uploaderType)
@@ -143,16 +158,6 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
return nil, []error{err}
}
err = kube.IsPodRunning(pod)
if err != nil {
return nil, []error{err}
}
err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient)
if err != nil {
return nil, []error{err}
}
// get a single non-exclusive lock since we'll wait for all individual
// backups to be complete before releasing it.
b.repoLocker.Lock(repo.Name)

View File

@@ -21,6 +21,8 @@ import (
"context"
"os"
goerr "errors"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
@@ -64,7 +66,7 @@ func GetS3Credentials(config map[string]string) (credentials.Value, error) {
return credentials.Value{}, errors.New("missing credential file")
}
creds := credentials.NewSharedCredentials(credentialsFile, "")
creds := credentials.NewSharedCredentials(credentialsFile, config[awsProfileKey])
credValue, err := creds.Get()
if err != nil {
return credValue, err
@@ -76,16 +78,20 @@ func GetS3Credentials(config map[string]string) (credentials.Value, error) {
// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error
// if the region cannot be determined.
func GetAWSBucketRegion(bucket string) (string, error) {
var region string
sess, err := session.NewSession()
if err != nil {
return "", errors.WithStack(err)
}
var region string
var requestErrs []error
for _, partition := range endpoints.DefaultPartitions() {
for regionHint := range partition.Regions() {
region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
region, err = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
if err != nil {
requestErrs = append(requestErrs, errors.Wrapf(err, "error to get region with hint %s", regionHint))
}
// we only need to try a single region hint per partition, so break after the first
break
@@ -96,5 +102,9 @@ func GetAWSBucketRegion(bucket string) (string, error) {
}
}
return "", errors.New("unable to determine bucket's region")
if requestErrs == nil {
return "", errors.Errorf("unable to determine region by bucket %s", bucket)
} else {
return "", errors.Wrapf(goerr.Join(requestErrs...), "error to get region by bucket %s", bucket)
}
}

View File

@@ -66,7 +66,7 @@ const (
repoOpDescMaintain = "repo maintenance"
repoOpDescForget = "forget"
repoConnectDesc = "unfied repo"
repoConnectDesc = "unified repo"
)
// NewUnifiedRepoProvider creates the service provider for Unified Repo
@@ -301,6 +301,11 @@ func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, p
return errors.Wrap(err, "error to delete manifest")
}
err = bkRepo.Flush(ctx)
if err != nil {
return errors.Wrap(err, "error to flush repo")
}
log.Debug("Forget snapshot complete")
return nil
@@ -472,9 +477,11 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo
var err error
if s3Url == "" {
region, err = getS3BucketRegion(bucket)
if err != nil {
return map[string]string{}, errors.Wrap(err, "error get s3 bucket region")
if region == "" {
region, err = getS3BucketRegion(bucket)
if err != nil {
return map[string]string{}, errors.Wrap(err, "error get s3 bucket region")
}
}
s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region)

View File

@@ -734,6 +734,7 @@ func TestForget(t *testing.T) {
backupRepo *reposervicenmocks.BackupRepo
retFuncOpen []interface{}
retFuncDelete interface{}
retFuncFlush interface{}
credStoreReturn string
credStoreError error
expectedErr string
@@ -794,6 +795,37 @@ func TestForget(t *testing.T) {
},
expectedErr: "error to delete manifest: fake-error-3",
},
{
name: "flush fail",
getter: new(credmock.SecretStore),
credStoreReturn: "fake-password",
funcTable: localFuncTable{
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) {
return map[string]string{}, nil
},
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
return map[string]string{}, nil
},
},
repoService: new(reposervicenmocks.BackupRepoService),
backupRepo: new(reposervicenmocks.BackupRepo),
retFuncOpen: []interface{}{
func(context.Context, udmrepo.RepoOptions) udmrepo.BackupRepo {
return backupRepo
},
func(context.Context, udmrepo.RepoOptions) error {
return nil
},
},
retFuncDelete: func(context.Context, udmrepo.ID) error {
return nil
},
retFuncFlush: func(context.Context) error {
return errors.New("fake-error-4")
},
expectedErr: "error to flush repo: fake-error-4",
},
}
for _, tc := range testCases {
@@ -822,6 +854,7 @@ func TestForget(t *testing.T) {
if tc.backupRepo != nil {
backupRepo.On("DeleteManifest", mock.Anything, mock.Anything).Return(tc.retFuncDelete)
backupRepo.On("Flush", mock.Anything).Return(tc.retFuncFlush)
backupRepo.On("Close", mock.Anything).Return(nil)
}

View File

@@ -1357,8 +1357,8 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
// otherwise, we will return the original creation error.
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
if err != nil && isAlreadyExistsError {
ctx.log.Errorf("Error retrieving in-cluster version of %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
ctx.log.Warnf("Unable to retrieve in-cluster version of %s: %v, object won't be restored by velero or have restore labels, and existing resource policy is not applied", kube.NamespaceAndName(obj), err)
warnings.Add(namespace, err)
return warnings, errs, itemExists
}
}
@@ -1514,10 +1514,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
if patchBytes != nil {
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs, itemExists
if !apierrors.IsNotFound(err) {
errs.Add(namespace, err)
return warnings, errs, itemExists
}
} else {
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
}
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
}
if groupResource == kuberesource.Pods {

View File

@@ -198,6 +198,10 @@ func SnapshotSource(
return "", 0, errors.Wrapf(err, "unable to set policy for si %v", sourceInfo)
}
if err := rep.Flush(ctx); err != nil {
return "", 0, errors.Wrap(err, "error to flush policy repo")
}
policyTree, err := treeForSourceFunc(ctx, rep, sourceInfo)
if err != nil {
return "", 0, errors.Wrapf(err, "unable to create policy getter for si %v", sourceInfo)
@@ -231,7 +235,7 @@ func reportSnapshotStatus(manifest *snapshot.Manifest, policyTree *policy.Tree)
var errs []string
if ds := manifest.RootEntry.DirSummary; ds != nil {
for _, ent := range ds.FailedEntries {
policy := policyTree.DefinedPolicy()
policy := policyTree.EffectivePolicy()
if !(policy != nil && *policy.ErrorHandlingPolicy.IgnoreUnknownTypes == true && strings.Contains(ent.Error, fs.ErrUnknown.Error())) {
errs = append(errs, fmt.Sprintf("Error when processing %v: %v", ent.EntryPath, ent.Error))
}

View File

@@ -71,7 +71,7 @@ func NewUploaderProvider(
log logrus.FieldLogger,
) (Provider, error) {
if credGetter.FromFile == nil {
return nil, errors.New("uninitialized FileStore credentail is not supported")
return nil, errors.New("uninitialized FileStore credential is not supported")
}
if uploaderType == uploader.KopiaType {
// We use the hardcode repositoryType velerov1api.BackupRepositoryTypeKopia for now, because we have only one implementation of unified repo.

View File

@@ -6,7 +6,7 @@ layout: docs
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -6,7 +6,7 @@ layout: docs
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -6,7 +6,7 @@ layout: docs
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -6,7 +6,7 @@ layout: docs
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -6,7 +6,7 @@ layout: docs
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -6,7 +6,7 @@ layout: docs
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -10,7 +10,7 @@ the supported cloud providers block storage offerings (Amazon EBS Volumes, Az
It also provides a plugin model that enables anyone to implement additional object and block storage backends, outside the
main Velero repository.
The restic intergation was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
The restic integration was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
local, or any other volume type that doesn't have a native snapshot concept, restic might be for you.
Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable

View File

@@ -171,7 +171,7 @@ func TTLTest() {
Expect(t).To(Equal(test.ttl))
})
By(fmt.Sprintf("Waiting %s minutes for removing backup ralated resources by GC", test.ttl.String()), func() {
By(fmt.Sprintf("Waiting %s minutes for removing backup related resources by GC", test.ttl.String()), func() {
time.Sleep(test.ttl)
})

View File

@@ -75,6 +75,15 @@ func (p *PVCSelectedNodeChanging) StartRun() error {
}
func (p *PVCSelectedNodeChanging) CreateResources() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
By(fmt.Sprintf("Create a storage class %s.", StorageClassName), func() {
Expect(InstallStorageClass(context.Background(), fmt.Sprintf("testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))).To(Succeed())
})
By(fmt.Sprintf("Create a storage class %s.", StorageClassName), func() {
Expect(InstallTestStorageClasses(fmt.Sprintf("testdata/storage-class/%s.yaml", VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class")
})
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))
@@ -87,7 +96,7 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
p.oldNodeName = nodeName
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
pvcAnn := map[string]string{p.ann: nodeName}
_, err := CreatePodWithPVC(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn)
_, err := CreatePod(p.Client, p.namespace, p.podName, StorageClassName, p.pvcName, []string{p.volume}, pvcAnn, nil)
Expect(err).To(Succeed())
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
Expect(err).To(Succeed())
@@ -98,7 +107,7 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
By("Prepare ConfigMap data", func() {
nodeNameList, err := GetWorkerNodes(context.Background())
Expect(err).To(Succeed())
Expect(len(nodeNameList) > 2).To(Equal(true))
Expect(len(nodeNameList) > 1).To(Equal(true))
for _, nodeName := range nodeNameList {
if nodeName != p.oldNodeName {
p.newNodeName = nodeName
@@ -142,7 +151,7 @@ func (p *PVCSelectedNodeChanging) Restore() error {
}
func (p *PVCSelectedNodeChanging) Verify() error {
By(fmt.Sprintf("PVC selected node should be %s", p.newNodeName), func() {
pvcNameList, err := GetPvcByPodName(context.Background(), p.mappedNS, p.pvcName)
pvcNameList, err := GetPvcByPVCName(context.Background(), p.mappedNS, p.pvcName)
Expect(err).To(Succeed())
Expect(len(pvcNameList)).Should(Equal(1))
pvc, err := GetPVC(context.Background(), p.Client, p.mappedNS, pvcNameList[0])

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/google/uuid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -24,44 +25,44 @@ type StorageClasssChanging struct {
namespace string
srcStorageClass string
desStorageClass string
pvcName string
volume string
podName string
mappedNS string
deploymentName string
CaseBaseName string
}
const SCCBaseName string = "scc-"
var StorageClasssChangingTest func() = TestFunc(&StorageClasssChanging{
namespace: SCCBaseName + "1", TestCase: TestCase{NSBaseName: SCCBaseName}})
var StorageClasssChangingTest func() = TestFunc(&StorageClasssChanging{})
func (s *StorageClasssChanging) Init() error {
s.TestCase.Init()
UUIDgen, err := uuid.NewRandom()
Expect(err).To(Succeed())
s.CaseBaseName = SCCBaseName + UUIDgen.String()
s.namespace = SCCBaseName + UUIDgen.String()
s.BackupName = "backup-" + s.CaseBaseName
s.RestoreName = "restore-" + s.CaseBaseName
s.mappedNS = s.namespace + "-mapped"
s.VeleroCfg = VeleroCfg
s.Client = *s.VeleroCfg.ClientToInstallVelero
s.NSBaseName = SCCBaseName
s.namespace = s.NSBaseName + UUIDgen.String()
s.mappedNS = s.namespace + "-mapped"
s.TestMsg = &TestMSG{
Desc: "Changing PV/PVC Storage Classes",
FailedMSG: "Failed to changing PV/PVC Storage Classes",
Text: "Change the storage class of persistent volumes and persistent" +
" volume claims during restores",
}
s.BackupName = "backup-sc-" + UUIDgen.String()
s.RestoreName = "restore-" + UUIDgen.String()
s.srcStorageClass = "default"
s.desStorageClass = "e2e-storage-class"
s.srcStorageClass = StorageClassName
s.desStorageClass = StorageClassName2
s.labels = map[string]string{"velero.io/change-storage-class": "RestoreItemAction",
"velero.io/plugin-config": ""}
s.data = map[string]string{s.srcStorageClass: s.desStorageClass}
s.configmaptName = "change-storage-class-config"
s.volume = "volume-1"
s.pvcName = fmt.Sprintf("pvc-%s", s.volume)
s.podName = "pod-1"
return nil
}
func (s *StorageClasssChanging) StartRun() error {
s.BackupName = s.BackupName + "backup-" + UUIDgen.String()
s.RestoreName = s.RestoreName + "restore-" + UUIDgen.String()
s.BackupArgs = []string{
"create", "--namespace", VeleroCfg.VeleroNamespace, "backup", s.BackupName,
"--include-namespaces", s.namespace,
@@ -74,20 +75,34 @@ func (s *StorageClasssChanging) StartRun() error {
return nil
}
func (s *StorageClasssChanging) CreateResources() error {
s.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
By(fmt.Sprintf("Create a storage class %s", s.desStorageClass), func() {
Expect(InstallStorageClass(context.Background(), fmt.Sprintf("testdata/storage-class/%s.yaml",
s.VeleroCfg.CloudProvider))).To(Succeed())
label := map[string]string{
"app": "test",
}
s.Ctx, _ = context.WithTimeout(context.Background(), 10*time.Minute)
By(("Installing storage class..."), func() {
Expect(InstallTestStorageClasses(fmt.Sprintf("testdata/storage-class/%s.yaml", s.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class")
})
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", s.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
_, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil)
By(fmt.Sprintf("Create a deployment in namespace %s", s.VeleroCfg.VeleroNamespace), func() {
pvc, err := CreatePVC(s.Client, s.namespace, s.pvcName, s.srcStorageClass, nil)
Expect(err).To(Succeed())
vols := CreateVolumes(pvc.Name, []string{s.volume})
deployment := NewDeployment(s.CaseBaseName, s.namespace, 1, label, nil).WithVolume(vols).Result()
deployment, err = CreateDeployment(s.Client.ClientGo, s.namespace, deployment)
Expect(err).To(Succeed())
s.deploymentName = deployment.Name
err = WaitForReadyDeployment(s.Client.ClientGo, s.namespace, s.deploymentName)
Expect(err).To(Succeed())
})
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() {
_, err := CreateConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.configmaptName, s.labels, s.data)
Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", s.VeleroCfg.VeleroNamespace))
@@ -97,19 +112,14 @@ func (s *StorageClasssChanging) CreateResources() error {
func (s *StorageClasssChanging) Destroy() error {
By(fmt.Sprintf("Expect storage class of PV %s to be %s ", s.volume, s.srcStorageClass), func() {
pvName, err := GetPVByPodName(s.Client, s.namespace, s.volume)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV name by pod name %s", s.podName))
pvName, err := GetPVByPVCName(s.Client, s.namespace, s.pvcName)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV name by PVC name %s", s.pvcName))
pv, err := GetPersistentVolume(s.Ctx, s.Client, s.namespace, pvName)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV by pod name %s", s.podName))
fmt.Println(pv)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV by name %s", pvName))
Expect(pv.Spec.StorageClassName).To(Equal(s.srcStorageClass),
fmt.Sprintf("PV storage %s is not as expected %s", pv.Spec.StorageClassName, s.srcStorageClass))
})
By(fmt.Sprintf("Start to destroy namespace %s......", s.NSBaseName), func() {
Expect(CleanupNamespacesWithPoll(s.Ctx, s.Client, s.NSBaseName)).To(Succeed(),
fmt.Sprintf("Failed to delete namespace %s", s.NSBaseName))
})
return nil
}
@@ -129,14 +139,26 @@ func (s *StorageClasssChanging) Restore() error {
}
func (s *StorageClasssChanging) Verify() error {
By(fmt.Sprintf("Expect storage class of PV %s to be %s ", s.volume, s.desStorageClass), func() {
time.Sleep(1 * time.Minute)
pvName, err := GetPVByPodName(s.Client, s.mappedNS, s.volume)
Expect(WaitForReadyDeployment(s.Client.ClientGo, s.mappedNS, s.deploymentName)).To(Succeed())
pvName, err := GetPVByPVCName(s.Client, s.mappedNS, s.pvcName)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV name by pod name %s", s.podName))
pv, err := GetPersistentVolume(s.Ctx, s.Client, s.mappedNS, pvName)
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV by pod name %s", s.podName))
fmt.Println(pv)
Expect(pv.Spec.StorageClassName).To(Equal(s.desStorageClass),
fmt.Sprintf("PV storage %s is not as expected %s", pv.Spec.StorageClassName, s.desStorageClass))
})
return nil
}
func (s *StorageClasssChanging) Clean() error {
if !s.VeleroCfg.Debug {
By(fmt.Sprintf("Start to destroy namespace %s......", s.CaseBaseName), func() {
Expect(CleanupNamespacesWithPoll(s.Ctx, s.Client, s.CaseBaseName)).To(Succeed(),
fmt.Sprintf("Failed to delete namespace %s", s.CaseBaseName))
})
DeleteConfigmap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.configmaptName)
DeleteStorageClass(s.Ctx, s.Client, s.desStorageClass)
s.TestCase.Clean()
}
return nil
}

View File

@@ -168,13 +168,13 @@ func BslDeletionTest(useVolumeSnapshots bool) {
Expect(AddLabelToPod(context.Background(), "kibishii-deployment-1", bslDeletionTestNs, label_2)).To(Succeed())
})
By("Get all 2 PVCs of Kibishii and label them seprately ", func() {
pvc, err := GetPvcByPodName(context.Background(), bslDeletionTestNs, podName_1)
By("Get all 2 PVCs of Kibishii and label them separately ", func() {
pvc, err := GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName_1)
Expect(err).To(Succeed())
fmt.Println(pvc)
Expect(len(pvc)).To(Equal(1))
pvc1 := pvc[0]
pvc, err = GetPvcByPodName(context.Background(), bslDeletionTestNs, podName_2)
pvc, err = GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName_2)
Expect(err).To(Succeed())
fmt.Println(pvc)
Expect(len(pvc)).To(Equal(1))

View File

@@ -117,6 +117,7 @@ var _ = Describe("[Backups][BackupsSync] Backups in object storage are synced to
var _ = Describe("[Schedule][BR][Pause][LongTime] Backup will be created periodly by schedule defined by a Cron expression", ScheduleBackupTest)
var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources)
var _ = Describe("[Schedule][BackupCreation] Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", ScheduleBackupCreationTest)
var _ = Describe("[PrivilegesMgmt][SSR] Velero test on ssr object when controller namespace mix-ups", SSRTest)

View File

@@ -97,7 +97,7 @@ func (p *PVBackupFiltering) CreateResources() error {
podName := fmt.Sprintf("pod-%d", i)
pods = append(pods, podName)
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil)
pod, err := CreatePod(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil, nil)
Expect(err).To(Succeed())
ann := map[string]string{
p.annotation: volumesToAnnotation,
@@ -124,7 +124,7 @@ func (p *PVBackupFiltering) CreateResources() error {
WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])
for i, pod := range p.podsList[index] {
for j := range p.volumesList[i] {
Expect(CreateFileToPod(p.Ctx, ns, pod, p.volumesList[i][j],
Expect(CreateFileToPod(p.Ctx, ns, pod, pod, p.volumesList[i][j],
FILE_NAME, fileContent(ns, pod, p.volumesList[i][j]))).To(Succeed())
}
}
@@ -182,7 +182,7 @@ func fileContent(namespace, podName, volume string) string {
}
func fileExist(ctx context.Context, namespace, podName, volume string) error {
c, err := ReadFileFromPodVolume(ctx, namespace, podName, volume, FILE_NAME)
c, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ",
FILE_NAME, volume, podName, namespace))
@@ -197,7 +197,7 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error {
}
}
func fileNotExist(ctx context.Context, namespace, podName, volume string) error {
_, err := ReadFileFromPodVolume(ctx, namespace, podName, volume, FILE_NAME)
_, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
if err != nil {
return nil
} else {

View File

@@ -84,7 +84,7 @@ func (f *FilteringCase) CreateResources() error {
}
//Create deployment
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
deployment := NewDeployment(f.NSBaseName, namespace, f.replica, f.labels, nil)
deployment := NewDeployment(f.NSBaseName, namespace, f.replica, f.labels, nil).Result()
deployment, err := CreateDeployment(f.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))

View File

@@ -100,7 +100,7 @@ func (e *ExcludeFromBackup) CreateResources() error {
}
//Create deployment: to be included
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
deployment := NewDeployment(e.NSBaseName, namespace, e.replica, label2, nil)
deployment := NewDeployment(e.NSBaseName, namespace, e.replica, label2, nil).Result()
deployment, err := CreateDeployment(e.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))

View File

@@ -101,7 +101,7 @@ func (l *LabelSelector) CreateResources() error {
//Create deployment
fmt.Printf("Creating deployment in namespaces ...%s\n", namespace)
deployment := NewDeployment(l.NSBaseName, namespace, l.replica, labels, nil)
deployment := NewDeployment(l.NSBaseName, namespace, l.replica, labels, nil).Result()
deployment, err := CreateDeployment(l.Client.ClientGo, namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))

View File

@@ -159,7 +159,7 @@ func (o *OrderedResources) CreateResources() error {
//Create deployment
deploymentName := fmt.Sprintf("deploy-%s", o.NSBaseName)
fmt.Printf("Creating deployment %s in %s namespaces ...\n", deploymentName, o.Namespace)
deployment := NewDeployment(deploymentName, o.Namespace, 1, label, nil)
deployment := NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result()
deployment, err := CreateDeployment(o.Client.ClientGo, o.Namespace, deployment)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create namespace %q with err %v", o.Namespace, err))

View File

@@ -0,0 +1,138 @@
package schedule
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/vmware-tanzu/velero/test/e2e"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
. "github.com/vmware-tanzu/velero/test/e2e/util/velero"
)
type ScheduleBackupCreation struct {
TestCase
namespace string
ScheduleName string
ScheduleArgs []string
Period int //Limitation: The unit is minitue only and 60 is divisible by it
randBackupName string
verifyTimes int
volume string
podName string
pvcName string
podAnn map[string]string
podSleepDuration time.Duration
}
var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{namespace: "sch1", TestCase: TestCase{NSBaseName: "schedule-backup-creation-test", UseVolumeSnapshots: false}})
func (n *ScheduleBackupCreation) Init() error {
n.VeleroCfg = VeleroCfg
n.Client = *n.VeleroCfg.ClientToInstallVelero
n.Period = 3 // Unit is minute
n.verifyTimes = 5 // More larger verify times more confidence we have
podSleepDurationStr := "300s"
n.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr)
n.TestMsg = &TestMSG{
Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
FailedMSG: "Failed to verify schedule back creation behavior",
Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
}
n.podAnn = map[string]string{
"pre.hook.backup.velero.io/container": n.podName,
"pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]",
"pre.hook.backup.velero.io/timeout": "600s",
}
n.volume = "volume-1"
n.podName = "pod-1"
n.pvcName = "pvc-1"
return nil
}
func (n *ScheduleBackupCreation) StartRun() error {
n.namespace = fmt.Sprintf("%s-%s", n.NSBaseName, "ns")
n.ScheduleName = n.ScheduleName + "schedule-" + UUIDgen.String()
n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String()
n.ScheduleArgs = []string{
"--include-namespaces", n.namespace,
"--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *",
"--default-volumes-to-fs-backup",
}
Expect(n.Period < 30).To(Equal(true))
return nil
}
func (p *ScheduleBackupCreation) CreateResources() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, nil, p.podAnn)
Expect(err).To(Succeed())
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
Expect(err).To(Succeed())
})
return nil
}
func (n *ScheduleBackupCreation) Backup() error {
// Wait until the beginning of the given period to create schedule, it will give us
// a predictable period to wait for the first scheduled backup, and verify no immediate
// scheduled backup was created between schedule creation and first scheduled backup.
By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() {
for i := 0; i < n.Period*60/30; i++ {
time.Sleep(30 * time.Second)
now := time.Now().Minute()
triggerNow := now % n.Period
if triggerNow == 0 {
Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string {
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "")
return "Fail to restore workload"
})
break
}
}
})
By("Delay one more minute to make sure the new backup was created in the given period", func() {
time.Sleep(1 * time.Minute)
})
By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() {
for i := 1; i <= n.verifyTimes; i++ {
fmt.Printf("Start to sleep %d minute #%d time...\n", n.podSleepDuration, i)
mi, _ := time.ParseDuration("60s")
time.Sleep(n.podSleepDuration + mi)
bMap := make(map[string]string)
backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed())
Expect(len(backupsInfo) == i).To(Equal(true))
for index, bi := range backupsInfo {
bList := strings.Split(bi, ",")
fmt.Printf("Backup %d: %v\n", index, bList)
bMap[bList[0]] = bList[1]
_, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1])
Expect(err).To(Succeed())
}
if i == n.verifyTimes-1 {
backupInfo := backupsInfo[rand.Intn(len(backupsInfo))]
n.randBackupName = strings.Split(backupInfo, ",")[0]
}
}
})
return nil
}
func (n *ScheduleBackupCreation) Restore() error {
return nil
}

View File

@@ -31,7 +31,7 @@ func (n *ScheduleBackup) Init() error {
n.VeleroCfg = VeleroCfg
n.Client = *n.VeleroCfg.ClientToInstallVelero
n.Period = 3 // Unit is minute
n.verifyTimes = 5 // More verify times more confidence
n.verifyTimes = 5 // More larger verify times more confidence we have
n.TestMsg = &TestMSG{
Desc: "Set up a scheduled backup defined by a Cron expression",
FailedMSG: "Failed to schedule a backup",

View File

@@ -83,7 +83,7 @@ func TestFunc(test VeleroBackupRestoreTest) func() {
flag.Parse()
veleroCfg := test.GetTestCase().VeleroCfg
// TODO: Skip nodeport test until issue https://github.com/kubernetes/kubernetes/issues/114384 fixed
if veleroCfg.CloudProvider == "azure" && strings.Contains(test.GetTestCase().NSBaseName, "nodeport") {
if (veleroCfg.CloudProvider == "azure" || veleroCfg.CloudProvider == "aws") && strings.Contains(test.GetTestCase().NSBaseName, "nodeport") {
Skip("Skip due to issue https://github.com/kubernetes/kubernetes/issues/114384 on AKS")
}
if veleroCfg.InstallVelero {

View File

@@ -120,6 +120,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
tmpCfgForOldVeleroInstall.RestoreHelperImage = ""
tmpCfgForOldVeleroInstall.Plugins = ""
tmpCfgForOldVeleroInstall.UploaderType = ""
tmpCfgForOldVeleroInstall.UseVolumeSnapshots = useVolumeSnapshots
if supportUploaderType {
tmpCfgForOldVeleroInstall.UseRestic = false
tmpCfgForOldVeleroInstall.UseNodeAgent = !useVolumeSnapshots

View File

@@ -10,6 +10,9 @@ import (
"os/exec"
)
const StorageClassName = "e2e-storage-class"
const StorageClassName2 = "e2e-storage-class-2"
type OsCommandLine struct {
Cmd string
Args []string

View File

@@ -93,7 +93,7 @@ func GetCsiSnapshotHandle(client TestClient, backupName string) ([]string, error
return snapshotHandleList, nil
}
func GetVolumeSnapshotContentNameByPod(client TestClient, podName, namespace, backupName string) (string, error) {
pvcList, err := GetPvcByPodName(context.Background(), namespace, podName)
pvcList, err := GetPvcByPVCName(context.Background(), namespace, podName)
if err != nil {
return "", err
}
@@ -129,13 +129,61 @@ func GetVolumeSnapshotContentNameByPod(client TestClient, podName, namespace, ba
return "", errors.New(fmt.Sprintf("Fail to get VolumeSnapshotContentName for pod %s under namespace %s", podName, namespace))
}
func CheckVolumeSnapshotCR(client TestClient, backupName string, expectedCount int) ([]string, error) {
func CheckVolumeSnapshotCR(client TestClient, backupName string, expectedCount int, apiVersion string) ([]string, error) {
var err error
var snapshotContentNameList []string
if snapshotContentNameList, err = GetCsiSnapshotHandle(client, backupName); err != nil ||
len(snapshotContentNameList) != expectedCount {
return nil, errors.Wrap(err, "Fail to get Azure CSI snapshot content")
if apiVersion == "v1beta1" {
if snapshotContentNameList, err = GetCsiSnapshotHandle(client, backupName); err != nil {
return nil, errors.Wrap(err, "Fail to get Azure CSI snapshot content")
}
} else if apiVersion == "v1" {
if snapshotContentNameList, err = GetCsiSnapshotHandleV1(client, backupName); err != nil {
return nil, errors.Wrap(err, "Fail to get Azure CSI snapshot content")
}
} else {
return nil, errors.New("API version is invalid")
}
if len(snapshotContentNameList) != expectedCount {
return nil, errors.New(fmt.Sprintf("Snapshot count %d is not as expect %d", len(snapshotContentNameList), expectedCount))
}
fmt.Println(snapshotContentNameList)
return snapshotContentNameList, nil
}
func GetCsiSnapshotHandleV1(client TestClient, backupName string) ([]string, error) {
_, snapshotClient, err := GetClients()
if err != nil {
return nil, err
}
vscList, err1 := snapshotClient.SnapshotV1().VolumeSnapshotContents().List(context.TODO(), metav1.ListOptions{})
if err1 != nil {
return nil, err
}
var snapshotHandleList []string
for _, i := range vscList.Items {
if i.Status == nil {
fmt.Println("SnapshotHandle Status s nil")
continue
}
if i.Status.SnapshotHandle == nil {
fmt.Println("SnapshotHandle is nil")
continue
}
if i.Labels == nil {
fmt.Println("VolumeSnapshotContents label is nil")
continue
}
if i.Labels["velero.io/backup-name"] == backupName {
tmp := strings.Split(*i.Status.SnapshotHandle, "/")
snapshotHandleList = append(snapshotHandleList, tmp[len(tmp)-1])
}
}
if len(snapshotHandleList) == 0 {
fmt.Printf("No VolumeSnapshotContent from backup %s", backupName)
}
return snapshotHandleList, nil
}

View File

@@ -20,12 +20,12 @@ import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
corev1 "k8s.io/api/core/v1"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@@ -34,7 +34,10 @@ import (
"github.com/vmware-tanzu/velero/test/e2e/util/common"
)
// ensureClusterExists returns whether or not a kubernetes cluster exists for tests to be run on.
const StorageClassName = "e2e-storage-class"
const StorageClassName2 = "e2e-storage-class-2"
// ensureClusterExists returns whether or not a Kubernetes cluster exists for tests to be run on.
func EnsureClusterExists(ctx context.Context) error {
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run()
}
@@ -64,12 +67,12 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods
checkPod, err := client.ClientGo.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
//Should ignore "etcdserver: request timed out" kind of errors, try to get pod status again before timeout.
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1api.PodRunning)))
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1.PodRunning)))
return false, nil
}
// If any pod is still waiting we don't need to check any more so return and wait for next poll interval
if checkPod.Status.Phase != corev1api.PodRunning {
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1api.PodRunning)
if checkPod.Status.Phase != corev1.PodRunning {
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1.PodRunning)
return false, nil
}
}
@@ -82,7 +85,7 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods
return nil
}
func GetPvcByPodName(ctx context.Context, namespace, podName string) ([]string, error) {
func GetPvcByPVCName(ctx context.Context, namespace, pvcName string) ([]string, error) {
// Example:
// NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
// kibishii-data-kibishii-deployment-0 Bound pvc-94b9fdf2-c30f-4a7b-87bf-06eadca0d5b6 1Gi RWO kibishii-storage-class 115s
@@ -95,7 +98,7 @@ func GetPvcByPodName(ctx context.Context, namespace, podName string) ([]string,
cmd = &common.OsCommandLine{
Cmd: "grep",
Args: []string{podName},
Args: []string{pvcName},
}
cmds = append(cmds, cmd)
@@ -201,6 +204,7 @@ func AddLabelToCRD(ctx context.Context, crd, label string) error {
func KubectlApplyByFile(ctx context.Context, file string) error {
args := []string{"apply", "-f", file, "--force=true"}
fmt.Println(args)
return exec.CommandContext(ctx, "kubectl", args...).Run()
}
@@ -214,39 +218,20 @@ func KubectlConfigUseContext(ctx context.Context, kubectlContext string) error {
return err
}
func GetAPIVersions(client *TestClient, name string) ([]string, error) {
var version []string
APIGroup, err := client.ClientGo.Discovery().ServerGroups()
if err != nil {
return nil, errors.Wrap(err, "Fail to get server API groups")
}
for _, group := range APIGroup.Groups {
fmt.Println(group.Name)
if group.Name == name {
for _, v := range group.Versions {
fmt.Println(v.Version)
version = append(version, v.Version)
}
return version, nil
}
}
return nil, errors.New("Server API groups is empty")
}
func GetPVByPodName(client TestClient, namespace, podName string) (string, error) {
pvcList, err := GetPvcByPodName(context.Background(), namespace, podName)
func GetPVByPVCName(client TestClient, namespace, pvcName string) (string, error) {
pvcList, err := GetPvcByPVCName(context.Background(), namespace, pvcName)
if err != nil {
return "", err
}
if len(pvcList) != 1 {
return "", errors.New(fmt.Sprintf("Only 1 PVC of pod %s should be found under namespace %s", podName, namespace))
return "", errors.New(fmt.Sprintf("Only 1 PVC of pod %s should be found under namespace %s but got %v", pvcName, namespace, pvcList))
}
pvList, err := GetPvByPvc(context.Background(), namespace, pvcList[0])
if err != nil {
return "", err
}
if len(pvList) != 1 {
return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], podName, namespace))
return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace))
}
pv_value, err := GetPersistentVolume(context.Background(), client, "", pvList[0])
fmt.Println(pv_value.Annotations["pv.kubernetes.io/provisioned-by"])
@@ -255,45 +240,31 @@ func GetPVByPodName(client TestClient, namespace, podName string) (string, error
}
return pv_value.Name, nil
}
func CreatePodWithPVC(client TestClient, ns, podName, sc, pvcName string, volumeNameList []string, pvcAnn map[string]string) (*corev1.Pod, error) {
volumes := []corev1.Volume{}
for _, volume := range volumeNameList {
var _pvcName string
if pvcName == "" {
_pvcName = fmt.Sprintf("pvc-%s", volume)
} else {
_pvcName = pvcName
}
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
if err != nil {
return nil, err
}
volumes = append(volumes, corev1.Volume{
func PrepareVolumeList(volumeNameList []string) (vols []*corev1.Volume) {
for i, volume := range volumeNameList {
vols = append(vols, &corev1.Volume{
Name: volume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ClaimName: fmt.Sprintf("pvc-%d", i),
ReadOnly: false,
},
},
})
}
pod, err := CreatePod(client, ns, podName, volumes)
if err != nil {
return nil, err
}
return pod, nil
return
}
func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error {
arg := []string{"exec", "-n", namespace, "-c", podName, podName,
func CreateFileToPod(ctx context.Context, namespace, podName, containerName, volume, filename, content string) error {
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", "/bin/sh", "-c", fmt.Sprintf("echo ns-%s pod-%s volume-%s > /%s/%s", namespace, podName, volume, volume, filename)}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
return cmd.Run()
}
func ReadFileFromPodVolume(ctx context.Context, namespace, podName, volume, filename string) (string, error) {
arg := []string{"exec", "-n", namespace, "-c", podName, podName,
func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, error) {
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
"--", "cat", fmt.Sprintf("/%s/%s", volume, filename)}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
@@ -361,3 +332,65 @@ func GetAllService(ctx context.Context) (string, error) {
}
return stdout, nil
}
func CreateVolumes(pvcName string, volumeNameList []string) (vols []*corev1.Volume) {
vols = []*corev1.Volume{}
for _, volume := range volumeNameList {
vols = append(vols, &corev1.Volume{
Name: volume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: false,
},
},
})
}
return
}
func GetAPIVersions(client *TestClient, name string) ([]string, error) {
var version []string
APIGroup, err := client.ClientGo.Discovery().ServerGroups()
if err != nil {
return nil, errors.Wrap(err, "Fail to get server API groups")
}
for _, group := range APIGroup.Groups {
fmt.Println(group.Name)
if group.Name == name {
for _, v := range group.Versions {
fmt.Println(v.Version)
version = append(version, v.Version)
}
return version, nil
}
}
return nil, errors.New("Server API groups is empty")
}
func InstallTestStorageClasses(path string) error {
ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5)
defer ctxCancel()
err := InstallStorageClass(ctx, path)
if err != nil {
return err
}
content, err := os.ReadFile(path)
if err != nil {
return errors.Wrapf(err, "failed to get %s when install storage class", path)
}
// replace sc to new value
newContent := strings.ReplaceAll(string(content), fmt.Sprintf("name: %s", StorageClassName), fmt.Sprintf("name: %s", StorageClassName2))
tmpFile, err := os.CreateTemp("", "sc-file")
if err != nil {
return errors.Wrapf(err, "failed to create temp file when install storage class")
}
defer os.Remove(tmpFile.Name())
if _, err := tmpFile.WriteString(newContent); err != nil {
return errors.Wrapf(err, "failed to write content into temp file %s when install storage class", tmpFile.Name())
}
return InstallStorageClass(ctx, tmpFile.Name())
}

View File

@@ -42,6 +42,20 @@ func CreateConfigMap(c clientset.Interface, ns, name string, labels, data map[st
return c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm, metav1.CreateOptions{})
}
func CreateConfigMapFromYAMLData(c clientset.Interface, yamlData, cmName, namespace string) error {
cmData := make(map[string]string)
cmData[cmName] = yamlData
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Namespace: namespace,
},
Data: cmData,
}
_, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), cm, metav1.CreateOptions{})
return err
}
// WaitForConfigMapComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
func WaitForConfigMapComplete(c clientset.Interface, ns, configmapName string) error {
return wait.Poll(PollInterval, PollTimeout, func() (bool, error) {
@@ -57,10 +71,18 @@ func GetConfigmap(c clientset.Interface, ns, secretName string) (*v1.ConfigMap,
return c.CoreV1().ConfigMaps(ns).Get(context.TODO(), secretName, metav1.GetOptions{})
}
func WaitForConfigmapDelete(c clientset.Interface, ns, name string) error {
func DeleteConfigmap(c clientset.Interface, ns, name string) error {
if err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to delete configmap in namespace %q", ns))
}
return nil
}
func WaitForConfigmapDelete(c clientset.Interface, ns, name string) error {
if err := DeleteConfigmap(c, ns, name); err != nil {
return err
}
return waitutil.PollImmediateInfinite(5*time.Second,
func() (bool, error) {
if _, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), ns, metav1.GetOptions{}); err != nil {

View File

@@ -26,6 +26,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
)
const (
@@ -35,46 +37,90 @@ const (
PollTimeout = 15 * time.Minute
)
// DeploymentBuilder builds Deployment objects.
type DeploymentBuilder struct {
*apps.Deployment
}
func (d *DeploymentBuilder) Result() *apps.Deployment {
return d.Deployment
}
// newDeployment returns a RollingUpdate Deployment with a fake container image
func NewDeployment(name, ns string, replicas int32, labels map[string]string, containers []v1.Container) *apps.Deployment {
func NewDeployment(name, ns string, replicas int32, labels map[string]string, containers []v1.Container) *DeploymentBuilder {
if containers == nil {
containers = []v1.Container{
{
Name: fmt.Sprintf("container-%s", "busybox"),
Name: "container-busybox",
Image: "gcr.io/velero-gcp/busybox:latest",
Command: []string{"sleep", "1000000"},
// Make pod obeys the restricted pod security standards.
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: boolptr.False(),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{"ALL"},
},
RunAsNonRoot: boolptr.True(),
RunAsUser: func(i int64) *int64 { return &i }(65534),
RunAsGroup: func(i int64) *int64 { return &i }(65534),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
},
},
}
}
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
Labels: labels,
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: labels},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: new(apps.RollingUpdateDeployment),
return &DeploymentBuilder{
&apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
Labels: labels,
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: labels},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: new(apps.RollingUpdateDeployment),
},
Spec: v1.PodSpec{
Containers: containers,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
FSGroup: func(i int64) *int64 { return &i }(65534),
FSGroupChangePolicy: func(policy v1.PodFSGroupChangePolicy) *v1.PodFSGroupChangePolicy { return &policy }(v1.FSGroupChangeAlways),
},
Containers: containers,
},
},
},
},
}
}
func (d *DeploymentBuilder) WithVolume(volumes []*v1.Volume) *DeploymentBuilder {
vmList := []v1.VolumeMount{}
for _, v := range volumes {
vmList = append(vmList, v1.VolumeMount{
Name: v.Name,
MountPath: "/" + v.Name,
})
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, *v)
}
// NOTE here just mount volumes to the first container
d.Spec.Template.Spec.Containers[0].VolumeMounts = vmList
return d
}
func CreateDeploy(c clientset.Interface, ns string, deployment *apps.Deployment) error {
_, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
return err

View File

@@ -35,6 +35,11 @@ import (
func CreateNamespace(ctx context.Context, client TestClient, namespace string) error {
ns := builder.ForNamespace(namespace).Result()
// Add label to avoid PSA check.
ns.Labels = map[string]string{
"pod-security.kubernetes.io/enforce": "baseline",
"pod-security.kubernetes.io/enforce-version": "latest",
}
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
@@ -45,6 +50,9 @@ func CreateNamespace(ctx context.Context, client TestClient, namespace string) e
func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace string, label map[string]string) error {
ns := builder.ForNamespace(namespace).Result()
ns.Labels = label
// Add label to avoid PSA check.
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
@@ -54,6 +62,11 @@ func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace
func CreateNamespaceWithAnnotation(ctx context.Context, client TestClient, namespace string, annotation map[string]string) error {
ns := builder.ForNamespace(namespace).Result()
// Add label to avoid PSA check.
ns.Labels = map[string]string{
"pod-security.kubernetes.io/enforce": "baseline",
"pod-security.kubernetes.io/enforce-version": "latest",
}
ns.ObjectMeta.Annotations = annotation
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {

View File

@@ -26,7 +26,34 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) {
func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1.Pod, error) {
if pvcName != "" && len(volumeNameList) != 1 {
return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty")
}
volumes := []corev1.Volume{}
for _, volume := range volumeNameList {
var _pvcName string
if pvcName == "" {
_pvcName = fmt.Sprintf("pvc-%s", volume)
} else {
_pvcName = pvcName
}
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
if err != nil {
return nil, err
}
volumes = append(volumes, corev1.Volume{
Name: volume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
})
}
vmList := []corev1.VolumeMount{}
for _, v := range volumes {
vmList = append(vmList, corev1.VolumeMount{
@@ -34,9 +61,11 @@ func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*co
MountPath: "/" + v.Name,
})
}
p := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: name,
Annotations: ann,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{

View File

@@ -38,7 +38,7 @@ func CreatePVC(client TestClient, ns, name, sc string, ann map[string]string) (*
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
corev1.ResourceStorage: resource.MustParse("1Mi"),
},
},
StorageClassName: &sc,

View File

@@ -3,6 +3,9 @@ package k8s
import (
"context"
"fmt"
"github.com/pkg/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func InstallStorageClass(ctx context.Context, yaml string) error {
@@ -10,3 +13,10 @@ func InstallStorageClass(ctx context.Context, yaml string) error {
err := KubectlApplyByFile(ctx, yaml)
return err
}
func DeleteStorageClass(ctx context.Context, client TestClient, name string) error {
if err := client.ClientGo.StorageV1().StorageClasses().Delete(ctx, name, v1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Could not retrieve storage classes %s", name)
}
return nil
}

View File

@@ -200,6 +200,13 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
}
labelNamespaceCmd := exec.CommandContext(ctx, "kubectl", "label", "namespace", namespace, "pod-security.kubernetes.io/enforce=baseline", "pod-security.kubernetes.io/enforce-version=latest", "--overwrite=true")
_, stderr, err = veleroexec.RunCommand(labelNamespaceCmd)
fmt.Printf("Label namespace with PSA policy: %s\n", labelNamespaceCmd)
if err != nil {
return errors.Wrapf(err, "failed to label namespace with PSA policy, stderr=%s", stderr)
}
kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
"-n", namespace, "-w", "--timeout=30m")
_, stderr, err = veleroexec.RunCommand(kibishiiSetWaitCmd)

View File

@@ -1056,15 +1056,25 @@ func GetResticRepositories(ctx context.Context, veleroNamespace, targetNamespace
return common.GetListByCmdPipes(ctx, cmds)
}
func GetSnapshotCheckPoint(client TestClient, VeleroCfg VeleroConfig, expectCount int, namespaceBackedUp, backupName string, kibishiiPodNameList []string) (SnapshotCheckPoint, error) {
func GetSnapshotCheckPoint(client TestClient, VeleroCfg VeleroConfig, expectCount int, namespaceBackedUp, backupName string, KibishiiPVCNameList []string) (SnapshotCheckPoint, error) {
var snapshotCheckPoint SnapshotCheckPoint
var err error
snapshotCheckPoint.ExpectCount = expectCount
snapshotCheckPoint.NamespaceBackedUp = namespaceBackedUp
snapshotCheckPoint.PodName = kibishiiPodNameList
snapshotCheckPoint.PodName = KibishiiPVCNameList
if VeleroCfg.CloudProvider == "azure" && strings.EqualFold(VeleroCfg.Features, "EnableCSI") {
snapshotCheckPoint.EnableCSI = true
if snapshotCheckPoint.SnapshotIDList, err = util.CheckVolumeSnapshotCR(client, backupName, expectCount); err != nil {
resourceName := "snapshot.storage.k8s.io"
srcVersions, err := GetAPIVersions(VeleroCfg.DefaultClient, resourceName)
if err != nil {
return snapshotCheckPoint, err
}
if len(srcVersions) == 0 {
return snapshotCheckPoint, errors.New("Fail to get APIVersion")
}
if snapshotCheckPoint.SnapshotIDList, err = util.CheckVolumeSnapshotCR(client, backupName, expectCount, srcVersions[0]); err != nil {
return snapshotCheckPoint, errors.Wrapf(err, "Fail to get Azure CSI snapshot content")
}
}
@@ -1183,12 +1193,9 @@ func UpdateVeleroDeployment(ctx context.Context, veleroCfg VeleroConfig) ([]stri
Args: []string{"get", "deploy", "-n", veleroCfg.VeleroNamespace, "-ojson"},
}
cmds = append(cmds, cmd)
var args string
if veleroCfg.CloudProvider == "vsphere" {
args = fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"harbor-repo.vmware.com\\/velero_ci\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
} else {
args = fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"gcr.io\\/velero-gcp\\/nightly\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
}
args := fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"gcr.io\\/velero-gcp\\/nightly\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
cmd = &common.OsCommandLine{
Cmd: "sed",
Args: []string{args},
@@ -1236,12 +1243,9 @@ func UpdateNodeAgent(ctx context.Context, veleroCfg VeleroConfig, dsjson string)
Args: []string{dsjson},
}
cmds = append(cmds, cmd)
var args string
if veleroCfg.CloudProvider == "vsphere" {
args = fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"harbor-repo.vmware.com\\/velero_ci\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
} else {
args = fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"gcr.io\\/velero-gcp\\/nightly\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
}
args := fmt.Sprintf("s#\\\"image\\\"\\: \\\"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]\\\"#\\\"image\\\"\\: \\\"gcr.io\\/velero-gcp\\/nightly\\/velero\\:%s\\\"#g", veleroCfg.VeleroVersion)
cmd = &common.OsCommandLine{
Cmd: "sed",
Args: []string{args},