Compare commits

...

32 Commits

Author SHA1 Message Date
Xun Jiang/Bruce Jiang
bdbe7eb242 Add v1.11.1 changelog. (#6522)
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-07-19 14:46:26 +08:00
danfengliu
5afe837f76 Merge pull request #6516 from blackpiglet/v1.11_change_push_to_gcr
Integrate pushing to docker hub and gcr.io in one docker build and push command
2023-07-18 15:33:14 +08:00
Xun Jiang
350cb6dec6 Integrate pushing to docker hub and gcr.io in one docker build and push command.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-07-18 14:20:03 +08:00
Xun Jiang/Bruce Jiang
ef23da3289 Fix release-1.11 push github action out of space issue. (#6500)
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-07-15 07:23:45 +08:00
Xun Jiang/Bruce Jiang
6862fb84b9 Merge pull request #6488 from kayrus/openstack-cis-zone-labels-1.11
Add support for OpenStack CSI drivers topology keys
2023-07-14 17:00:36 +08:00
lyndon
c8e405c89b Merge branch 'release-1.11' into openstack-cis-zone-labels-1.11 2023-07-14 16:43:33 +08:00
Xun Jiang/Bruce Jiang
5836a2a0c9 Merge pull request #6485 from blackpiglet/release-1.11_bump_golang
Bump Golang to v1.20.6 for release-1.11.
2023-07-13 11:24:08 +08:00
kayrus
a1e08f4eec Add support for OpenStack CSI drivers topology keys
Signed-off-by: kayrus <kayrus@users.noreply.github.com>
2023-07-12 22:33:11 +02:00
Xun Jiang
61a08ccc30 Bump Golang to v1.20.6 for release-1.11.
Signed-off-by: Xun Jiang <jxun@vmware.com>
2023-07-12 09:43:40 +08:00
Daniel Jiang
46a355c293 Merge pull request #6477 from Lyndon-Li/release-1.11
[Cherry-pick] Fix-issue-6297
2023-07-10 21:08:56 +08:00
Lyndon-Li
1cb966da57 fix-issue-6297
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-07-10 16:28:20 +08:00
Wenkai Yin(尹文开)
286db706e9 Restore Endpoints before Services (#6316)
Restore Endpoints before Services

Fixes #6280

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2023-06-20 14:34:50 +08:00
Xun Jiang/Bruce Jiang
f33ea376e9 Merge pull request #6322 from blackpiglet/psa_violation_fix_release_1.11
[cherry-pick][release-1.11]Make the E2E testing pods obey the restricted pod security standard.
2023-05-31 15:46:20 +08:00
Xun Jiang/Bruce Jiang
1349e570f9 Merge branch 'release-1.11' into psa_violation_fix_release_1.11 2023-05-31 15:31:37 +08:00
Shubham Pampattiwar
ba8465b87d Merge pull request #6324 from blackpiglet/6276_cherry_pick_release_1.11
[cherry-pick][release-1.11]Fix status.progress not getting updated for backup
2023-05-30 09:43:16 -07:00
kkothule
7dccc17690 Fix status.progress not getting updated for backup
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-05-30 15:47:38 +08:00
Xun Jiang
9b922782e1 Make the E2E testing pods obey the restricted pod security standard.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-05-30 15:29:16 +08:00
qiuming
dc0a712089 Merge pull request #6189 from Lyndon-Li/release-1.11
[1.11] Fix issue 6182
2023-04-26 10:23:42 +08:00
Lyndon-Li
d6755f7953 Fix issue 6182
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2023-04-25 16:12:37 +08:00
Xun Jiang/Bruce Jiang
6ac085316d Update Golang to v1.20 for release-1.11 branch. (#6159)
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
Co-authored-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-23 12:07:48 +08:00
Xun Jiang/Bruce Jiang
0da2baa908 Merge pull request #6138 from danfengliu/cherry-pick-schedule-test
[Cherry-pick 1.11] Add E2E test for schedule backup creation principle
2023-04-13 09:49:32 +08:00
danfengl
8628388445 [Cherry-pick 1.11]Add E2E test for schedule backup creation principle
Signed-off-by: danfengl <danfengl@vmware.com>
2023-04-13 09:30:58 +08:00
Shubham Pampattiwar
495063b4f6 Merge pull request #6140 from blackpiglet/add_5865_in_release_note
Add PR #5865 in release note.
2023-04-12 13:14:03 -04:00
Xun Jiang
87794d4615 Add PR #5865 in release note.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-12 21:54:14 +08:00
Daniel Jiang
c3e7fd7a74 Merge pull request #6110 from ywk253100/230411_mf
Ignore not found error during patching managedFields
2023-04-11 21:30:01 +08:00
Wenkai Yin(尹文开)
5c0c378797 Ignore not found error during patching managedFields
Ignore not found error during patching managedFields

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
2023-04-11 17:42:02 +08:00
lyndon
7d0d56e5fa Merge pull request #6104 from blackpiglet/release-1.11
Update goreleaser version in build image.
2023-04-11 09:55:13 +08:00
Xun Jiang
3c9570fd14 Update goreleaser version in build image.
Update goreleaser version to v1.15.2 in Velero build image.
Modify the path where goreleaser reads configuration file from.

Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-10 22:13:18 +08:00
Xun Jiang/Bruce Jiang
971396110f Merge pull request #6096 from blackpiglet/release-1.11
Pin Golang and distroless version for v1.11
2023-04-10 11:22:31 +08:00
Xun Jiang
9de61aa5a0 Pin Golang and distroless version for v1.11.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-10 11:05:43 +08:00
lyndon
5f3cb25311 Merge pull request #6095 from vmware-tanzu/blackpiglet-patch-1
Modify v1.11 changelog.
2023-04-10 10:26:07 +08:00
Xun Jiang/Bruce Jiang
e16cb76892 Modify resouce policy example document link in changelog.
Signed-off-by: Xun Jiang/Bruce Jiang <59276555+blackpiglet@users.noreply.github.com>
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
2023-04-07 20:14:05 +08:00
31 changed files with 338 additions and 111 deletions

View File

@@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI

View File

@@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI
@@ -72,7 +72,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
- name: Check out the code
uses: actions/checkout@v2

View File

@@ -10,7 +10,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
- name: Check out the code
uses: actions/checkout@v2
@@ -32,4 +32,5 @@ jobs:
- name: Run staticcheck
uses: dominikh/staticcheck-action@v1.3.0
with:
version: "2022.1.3"
version: "2023.1.3"
install-go: false

View File

@@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20.6'
id: go
- uses: actions/checkout@v3
@@ -60,10 +60,21 @@ jobs:
files: coverage.out
verbose: true
# Use the JSON key in secret to login gcr.io
- uses: 'docker/login-action@v2'
with:
registry: 'gcr.io' # or REGION.docker.pkg.dev
username: '_json_key'
password: '${{ secrets.GCR_SA_KEY }}'
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
- name: Publish container image
if: github.repository == 'vmware-tanzu/velero'
run: |
sudo swapoff -a
sudo rm -f /mnt/swapfile
docker image prune -a --force
# Build and push Velero image to docker registry
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
VERSION=$(./hack/docker-push.sh | grep 'VERSION:' | awk -F: '{print $2}' | xargs)
@@ -87,19 +98,3 @@ jobs:
uploader ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${GCS_BUCKET}
uploader ${VELERO_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
uploader ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
# Use the JSON key in secret to login gcr.io
- uses: 'docker/login-action@v1'
with:
registry: 'gcr.io' # or REGION.docker.pkg.dev
username: '_json_key'
password: '${{ secrets.GCR_SA_KEY }}'
# Push image to GCR to facilitate some environments that have rate limitation to docker hub, e.g. vSphere.
- name: Publish container image to GCR
if: github.repository == 'vmware-tanzu/velero'
run: |
sudo swapoff -a
sudo rm -f /mnt/swapfile
docker image prune -a --force
REGISTRY=gcr.io/velero-gcp ./hack/docker-push.sh

View File

@@ -54,3 +54,10 @@ release:
name: velero
draft: true
prerelease: auto
git:
# What should be used to sort tags when gathering the current and previous
# tags if there are more than one tag in the same commit.
#
# Default: `-version:refname`
tag_sort: -version:creatordate

View File

@@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as velero-builder
FROM --platform=$BUILDPLATFORM golang:1.20.6-bullseye as velero-builder
ARG GOPROXY
ARG BIN
@@ -44,7 +44,7 @@ RUN mkdir -p /output/usr/bin && \
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN}
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as restic-builder
FROM --platform=$BUILDPLATFORM golang:1.20.6-bullseye as restic-builder
ARG BIN
ARG TARGETOS
@@ -66,7 +66,7 @@ RUN mkdir -p /output/usr/bin && \
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh
# Velero image packing section
FROM gcr.io/distroless/base-nossl-debian11:nonroot
FROM gcr.io/distroless/base-nossl-debian11@sha256:9523ef8cf054e23a81e722d231c6f604ab43a03c5b174b5c8386c78c0b6473d0
LABEL maintainer="Nolan Brubaker <brubakern@vmware.com>"

View File

@@ -22,9 +22,11 @@ PKG := github.com/vmware-tanzu/velero
# Where to push the docker image.
REGISTRY ?= velero
GCR_REGISTRY ?= gcr.io/velero-gcp
# Image name
IMAGE ?= $(REGISTRY)/$(BIN)
GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
# that pull base images from different registries.
@@ -66,8 +68,10 @@ TAG_LATEST ?= false
ifeq ($(TAG_LATEST), true)
IMAGE_TAGS ?= $(IMAGE):$(VERSION) $(IMAGE):latest
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION) $(GCR_IMAGE):latest
else
IMAGE_TAGS ?= $(IMAGE):$(VERSION)
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
endif
ifeq ($(shell docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
@@ -183,6 +187,7 @@ endif
--output=type=$(BUILDX_OUTPUT_TYPE) \
--platform $(BUILDX_PLATFORMS) \
$(addprefix -t , $(IMAGE_TAGS)) \
$(addprefix -t , $(GCR_IMAGE_TAGS)) \
--build-arg=GOPROXY=$(GOPROXY) \
--build-arg=PKG=$(PKG) \
--build-arg=BIN=$(BIN) \

View File

@@ -50,7 +50,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.19 as tilt-helper
FROM golang:1.20.6 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@@ -1,3 +1,26 @@
## v1.11.1
### 2023-07-19
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.11.1
### Container Image
`velero/velero:v1.11.1`
### Documentation
https://velero.io/docs/v1.11/
### Upgrading
https://velero.io/docs/v1.11/upgrade-to-1.11/
### All changes
* Add support for OpenStack CSI drivers topology keys (#6488, @kayrus)
* Enhance the code because of #6297, the return value of GetBucketRegion is not recorded, as a result, when it fails, we have no way to get the cause (#6477, @Lyndon-Li)
* Fixed a bug where status.progress is not getting updated for backups. (#6324, @blackpiglet)
* Restore Endpoints before Services (#6316, @ywk253100)
* Fix issue #6182. If pod is not running, don't treat it as an error, let it go and leave a warning. (#6189, @Lyndon-Li)
## v1.11
### 2023-04-07
@@ -29,17 +52,23 @@ The Progress() and Cancel() methods are needed to facilitate long-running Restor
This is intended as a replacement for the previously-approved Upload Progress Monitoring design ([Upload Progress Monitoring](https://github.com/vmware-tanzu/velero/blob/main/design/upload-progress.md)) to expand the supported use cases beyond snapshot upload to include what was previously called Async Backup/Restore Item Actions.
#### Flexible resource policy that can filter volumes to skip in the backup
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [policy API design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/handle-backup-of-volumes-by-resources-filters.md#api-design) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [Resource policies rules](https://velero.io/docs/v1.11/resource-filtering/#resource-policies) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
#### Resource Filters that can distinguish cluster scope and namespace scope resources
This feature adds four new resource filters for backup. The new filters are separated into cluster scope and namespace scope. Before this feature, Velero could not filter cluster scope resources precisely. This feature provides the ability and refactors existing resource filter parameters.
#### New parameter in installation to customize the serviceaccount name
The `velero install` sub-command now includes a new parameter,`--service-account-name`, which allows users to specify the ServiceAccountName for the Velero and node-agent pods. This feature may be particularly useful for users who utilize IRSA (IAM Roles for Service Accounts) in Amazon EKS (Elastic Kubernetes Service)."
#### Add a parameter for setting the Velero server connection with the k8s API server's timeout
In Velero, some code pieces need to communicate with the k8s API server. Before v1.11, these code pieces used hard-code timeout settings. This feature adds a resource-timeout parameter in the velero server binary to make it configurable.
#### Add resource list in the output of the restore describe command
Before this feature, Velero restore didn't have a restored resources list as the Velero backup. It's not convenient for users to learn what is restored. This feature adds the resources list and the handling result of the resources (including created, updated, failed, and skipped).
#### Support JSON format output of backup describe command
Before the Velero v1.11 release, users could not choose Velero's backup describe command's output format. The command output format is friendly for human reading, but it's not a structured output, and it's not easy for other programs to get information from it. Velero v1.11 adds a JSON format output for the backup describe command.
#### Refactor controllers with controller-runtime
In v1.11, Backup Controller and Restore controller are refactored with controller-runtime. Till v1.11, all Velero controllers use the controller-runtime framework.
@@ -59,6 +88,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
### All Changes
* Ignore not found error during patching managedFields (#6110, @ywk253100)
* Modify new scope resource filters name. (#6089, @blackpiglet)
* Make Velero not exits when EnableCSI is on and CSI snapshot not installed (#6062, @blackpiglet)
* Restore Services before Clusters (#6057, @ywk253100)

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.18
go 1.20
require (
cloud.google.com/go/storage v1.21.0

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=linux/amd64 golang:1.19-bullseye
FROM --platform=linux/amd64 golang:1.20.6-bullseye
ARG GOPROXY
@@ -50,7 +50,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v
RUN go install github.com/golang/protobuf/protoc-gen-go@v1.4.3
# get goreleaser
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.12.3/goreleaser_Linux_x86_64.tar.gz && \
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.15.2/goreleaser_Linux_x86_64.tar.gz && \
tar xvf goreleaser_Linux_x86_64.tar.gz && \
mv goreleaser /usr/bin/goreleaser && \
chmod +x /usr/bin/goreleaser

View File

@@ -48,12 +48,10 @@ if [[ "${PUBLISH:-}" != "TRUE" ]]; then
goreleaser release \
--clean \
--release-notes="${RELEASE_NOTES_FILE}" \
--skip-publish \
--config goreleaser.yaml
--skip-publish
else
echo "Getting ready to publish"
goreleaser release \
--clean \
--release-notes="${RELEASE_NOTES_FILE}"
--config goreleaser.yaml
fi

View File

@@ -1,6 +0,0 @@
git:
# What should be used to sort tags when gathering the current and previous
# tags if there are more than one tag in the same commit.
#
# Default: `-version:refname`
tag_sort: -version:creatordate

View File

@@ -279,12 +279,16 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
items := collector.getAllItems()
log.WithField("progress", "").Infof("Collected %d items matching the backup spec from the Kubernetes API (actual number of items backed up may be more or less depending on velero.io/exclude-from-backup annotation, plugins returning additional related items to back up, etc.)", len(items))
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
original := backupRequest.Backup.DeepCopy()
backupRequest.Backup.Status.Progress.TotalItems = len(items)
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
updated := backupRequest.Backup.DeepCopy()
if updated.Status.Progress == nil {
updated.Status.Progress = &velerov1api.BackupProgress{}
}
updated.Status.Progress.TotalItems = len(items)
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress.totalItems")
}
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
itemBackupper := &itemBackupper{
backupRequest: backupRequest,
@@ -333,12 +337,16 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
lastUpdate = &val
case <-ticker.C:
if lastUpdate != nil {
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
original := backupRequest.Backup.DeepCopy()
backupRequest.Backup.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
updated := backupRequest.Backup.DeepCopy()
if updated.Status.Progress == nil {
updated.Status.Progress = &velerov1api.BackupProgress{}
}
updated.Status.Progress.TotalItems = lastUpdate.totalItems
updated.Status.Progress.ItemsBackedUp = lastUpdate.itemsBackedUp
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
}
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
lastUpdate = nil
}
}
@@ -413,12 +421,17 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
// do a final update on progress since we may have just added some CRDs and may not have updated
// for the last few processed items.
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
original = backupRequest.Backup.DeepCopy()
backupRequest.Backup.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
updated = backupRequest.Backup.DeepCopy()
if updated.Status.Progress == nil {
updated.Status.Progress = &velerov1api.BackupProgress{}
}
updated.Status.Progress.TotalItems = len(backupRequest.BackedUpItems)
updated.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems)
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
}
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
log.WithField("progress", "").Infof("Backed up a total of %d items", len(backupRequest.BackedUpItems))

View File

@@ -448,6 +448,10 @@ const (
azureCsiZoneKey = "topology.disk.csi.azure.com/zone"
gkeCsiZoneKey = "topology.gke.io/zone"
gkeZoneSeparator = "__"
// OpenStack CSI drivers topology keys
cinderCsiZoneKey = "topology.manila.csi.openstack.org/zone"
manilaCsiZoneKey = "topology.cinder.csi.openstack.org/zone"
)
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
@@ -502,7 +506,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
if !labelFound {
var k string
log.Infof("label %q is not present on PersistentVolume", zoneLabelDeprecated)
k, pvFailureDomainZone = zoneFromPVNodeAffinity(pv, awsEbsCsiZoneKey, azureCsiZoneKey, gkeCsiZoneKey, zoneLabel, zoneLabelDeprecated)
k, pvFailureDomainZone = zoneFromPVNodeAffinity(pv, awsEbsCsiZoneKey, azureCsiZoneKey, gkeCsiZoneKey, cinderCsiZoneKey, manilaCsiZoneKey, zoneLabel, zoneLabelDeprecated)
if pvFailureDomainZone != "" {
log.Infof("zone info from nodeAffinity requirements: %s, key: %s", pvFailureDomainZone, k)
} else {

View File

@@ -514,10 +514,13 @@ High priorities:
- Replica sets go before deployments/other controllers so they can be explicitly
restored and be adopted by controllers.
- CAPI ClusterClasses go before Clusters.
- Endpoints go before Services so no new Endpoints will be created
- Services go before Clusters so they can be adopted by AKO-operator and no new Services will be created
for the same clusters
Low priorities:
- Tanzu ClusterBootstraps go last as it can reference any other kind of resources.
ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster
- ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster
- CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic.
Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters.
See https://github.com/kubernetes-sigs/cluster-api/issues/4105
@@ -543,6 +546,7 @@ var defaultRestorePriorities = restore.Priorities{
// in the backup.
"replicasets.apps",
"clusterclasses.cluster.x-k8s.io",
"endpoints",
"services",
},
LowPriorities: []string{

View File

@@ -132,6 +132,21 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
return nil, nil
}
err := kube.IsPodRunning(pod)
if err != nil {
for _, volumeName := range volumesToBackup {
err = errors.Wrapf(err, "backup for volume %s is skipped", volumeName)
log.WithError(err).Warn("Skip pod volume")
}
return nil, nil
}
err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient)
if err != nil {
return nil, []error{err}
}
repositoryType := getRepositoryType(b.uploaderType)
if repositoryType == "" {
err := errors.Errorf("empty repository type, uploader %s", b.uploaderType)
@@ -143,16 +158,6 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
return nil, []error{err}
}
err = kube.IsPodRunning(pod)
if err != nil {
return nil, []error{err}
}
err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient)
if err != nil {
return nil, []error{err}
}
// get a single non-exclusive lock since we'll wait for all individual
// backups to be complete before releasing it.
b.repoLocker.Lock(repo.Name)

View File

@@ -21,6 +21,8 @@ import (
"context"
"os"
goerr "errors"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
@@ -76,16 +78,20 @@ func GetS3Credentials(config map[string]string) (credentials.Value, error) {
// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error
// if the region cannot be determined.
func GetAWSBucketRegion(bucket string) (string, error) {
var region string
sess, err := session.NewSession()
if err != nil {
return "", errors.WithStack(err)
}
var region string
var requestErrs []error
for _, partition := range endpoints.DefaultPartitions() {
for regionHint := range partition.Regions() {
region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
region, err = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
if err != nil {
requestErrs = append(requestErrs, errors.Wrapf(err, "error to get region with hint %s", regionHint))
}
// we only need to try a single region hint per partition, so break after the first
break
@@ -96,5 +102,9 @@ func GetAWSBucketRegion(bucket string) (string, error) {
}
}
return "", errors.New("unable to determine bucket's region")
if requestErrs == nil {
return "", errors.Errorf("unable to determine region by bucket %s", bucket)
} else {
return "", errors.Wrapf(goerr.Join(requestErrs...), "error to get region by bucket %s", bucket)
}
}

View File

@@ -1514,10 +1514,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
if patchBytes != nil {
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs, itemExists
if !apierrors.IsNotFound(err) {
errs.Add(namespace, err)
return warnings, errs, itemExists
}
} else {
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
}
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
}
if groupResource == kuberesource.Pods {

View File

@@ -71,7 +71,7 @@ func NewUploaderProvider(
log logrus.FieldLogger,
) (Provider, error) {
if credGetter.FromFile == nil {
return nil, errors.New("uninitialized FileStore credentail is not supported")
return nil, errors.New("uninitialized FileStore credential is not supported")
}
if uploaderType == uploader.KopiaType {
// We use the hardcode repositoryType velerov1api.BackupRepositoryTypeKopia for now, because we have only one implementation of unified repo.

View File

@@ -87,7 +87,7 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
p.oldNodeName = nodeName
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
pvcAnn := map[string]string{p.ann: nodeName}
_, err := CreatePodWithPVC(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn)
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn, nil)
Expect(err).To(Succeed())
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
Expect(err).To(Succeed())

View File

@@ -85,7 +85,7 @@ func (s *StorageClasssChanging) CreateResources() error {
})
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
_, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil)
_, err := CreatePod(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil, nil)
Expect(err).To(Succeed())
})
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() {

View File

@@ -117,6 +117,7 @@ var _ = Describe("[Backups][BackupsSync] Backups in object storage are synced to
var _ = Describe("[Schedule][BR][Pause][LongTime] Backup will be created periodly by schedule defined by a Cron expression", ScheduleBackupTest)
var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources)
var _ = Describe("[Schedule][BackupCreation] Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", ScheduleBackupCreationTest)
var _ = Describe("[PrivilegesMgmt][SSR] Velero test on ssr object when controller namespace mix-ups", SSRTest)

View File

@@ -97,7 +97,7 @@ func (p *PVBackupFiltering) CreateResources() error {
podName := fmt.Sprintf("pod-%d", i)
pods = append(pods, podName)
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil)
pod, err := CreatePod(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil, nil)
Expect(err).To(Succeed())
ann := map[string]string{
p.annotation: volumesToAnnotation,

View File

@@ -0,0 +1,138 @@
package schedule
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/vmware-tanzu/velero/test/e2e"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
. "github.com/vmware-tanzu/velero/test/e2e/util/velero"
)
type ScheduleBackupCreation struct {
TestCase
namespace string
ScheduleName string
ScheduleArgs []string
Period int //Limitation: The unit is minitue only and 60 is divisible by it
randBackupName string
verifyTimes int
volume string
podName string
pvcName string
podAnn map[string]string
podSleepDuration time.Duration
}
var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{namespace: "sch1", TestCase: TestCase{NSBaseName: "schedule-backup-creation-test", UseVolumeSnapshots: false}})
func (n *ScheduleBackupCreation) Init() error {
n.VeleroCfg = VeleroCfg
n.Client = *n.VeleroCfg.ClientToInstallVelero
n.Period = 3 // Unit is minute
n.verifyTimes = 5 // More larger verify times more confidence we have
podSleepDurationStr := "300s"
n.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr)
n.TestMsg = &TestMSG{
Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
FailedMSG: "Failed to verify schedule back creation behavior",
Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
}
n.podAnn = map[string]string{
"pre.hook.backup.velero.io/container": n.podName,
"pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]",
"pre.hook.backup.velero.io/timeout": "600s",
}
n.volume = "volume-1"
n.podName = "pod-1"
n.pvcName = "pvc-1"
return nil
}
func (n *ScheduleBackupCreation) StartRun() error {
n.namespace = fmt.Sprintf("%s-%s", n.NSBaseName, "ns")
n.ScheduleName = n.ScheduleName + "schedule-" + UUIDgen.String()
n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String()
n.ScheduleArgs = []string{
"--include-namespaces", n.namespace,
"--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *",
"--default-volumes-to-fs-backup",
}
Expect(n.Period < 30).To(Equal(true))
return nil
}
func (p *ScheduleBackupCreation) CreateResources() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(),
fmt.Sprintf("Failed to create namespace %s", p.namespace))
})
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, nil, p.podAnn)
Expect(err).To(Succeed())
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
Expect(err).To(Succeed())
})
return nil
}
func (n *ScheduleBackupCreation) Backup() error {
// Wait until the beginning of the given period to create schedule, it will give us
// a predictable period to wait for the first scheduled backup, and verify no immediate
// scheduled backup was created between schedule creation and first scheduled backup.
By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() {
for i := 0; i < n.Period*60/30; i++ {
time.Sleep(30 * time.Second)
now := time.Now().Minute()
triggerNow := now % n.Period
if triggerNow == 0 {
Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string {
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "")
return "Fail to restore workload"
})
break
}
}
})
By("Delay one more minute to make sure the new backup was created in the given period", func() {
time.Sleep(1 * time.Minute)
})
By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() {
for i := 1; i <= n.verifyTimes; i++ {
fmt.Printf("Start to sleep %d minute #%d time...\n", n.podSleepDuration, i)
mi, _ := time.ParseDuration("60s")
time.Sleep(n.podSleepDuration + mi)
bMap := make(map[string]string)
backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName)
Expect(err).To(Succeed())
Expect(len(backupsInfo) == i).To(Equal(true))
for index, bi := range backupsInfo {
bList := strings.Split(bi, ",")
fmt.Printf("Backup %d: %v\n", index, bList)
bMap[bList[0]] = bList[1]
_, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1])
Expect(err).To(Succeed())
}
if i == n.verifyTimes-1 {
backupInfo := backupsInfo[rand.Intn(len(backupsInfo))]
n.randBackupName = strings.Split(backupInfo, ",")[0]
}
}
})
return nil
}
func (n *ScheduleBackupCreation) Restore() error {
return nil
}

View File

@@ -31,7 +31,7 @@ func (n *ScheduleBackup) Init() error {
n.VeleroCfg = VeleroCfg
n.Client = *n.VeleroCfg.ClientToInstallVelero
n.Period = 3 // Unit is minute
n.verifyTimes = 5 // More verify times more confidence
n.verifyTimes = 5 // More larger verify times more confidence we have
n.TestMsg = &TestMSG{
Desc: "Set up a scheduled backup defined by a Cron expression",
FailedMSG: "Failed to schedule a backup",

View File

@@ -25,7 +25,6 @@ import (
"github.com/pkg/errors"
"golang.org/x/net/context"
corev1 "k8s.io/api/core/v1"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@@ -64,12 +63,12 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods
checkPod, err := client.ClientGo.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
//Should ignore "etcdserver: request timed out" kind of errors, try to get pod status again before timeout.
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1api.PodRunning)))
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1.PodRunning)))
return false, nil
}
// If any pod is still waiting we don't need to check any more so return and wait for next poll interval
if checkPod.Status.Phase != corev1api.PodRunning {
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1api.PodRunning)
if checkPod.Status.Phase != corev1.PodRunning {
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1.PodRunning)
return false, nil
}
}
@@ -255,35 +254,6 @@ func GetPVByPodName(client TestClient, namespace, podName string) (string, error
}
return pv_value.Name, nil
}
func CreatePodWithPVC(client TestClient, ns, podName, sc, pvcName string, volumeNameList []string, pvcAnn map[string]string) (*corev1.Pod, error) {
volumes := []corev1.Volume{}
for _, volume := range volumeNameList {
var _pvcName string
if pvcName == "" {
_pvcName = fmt.Sprintf("pvc-%s", volume)
} else {
_pvcName = pvcName
}
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
if err != nil {
return nil, err
}
volumes = append(volumes, corev1.Volume{
Name: volume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
})
}
pod, err := CreatePod(client, ns, podName, volumes)
if err != nil {
return nil, err
}
return pod, nil
}
func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error {
arg := []string{"exec", "-n", namespace, "-c", podName, podName,

View File

@@ -35,6 +35,11 @@ import (
func CreateNamespace(ctx context.Context, client TestClient, namespace string) error {
ns := builder.ForNamespace(namespace).Result()
// Add label to avoid PSA check.
ns.Labels = map[string]string{
"pod-security.kubernetes.io/enforce": "baseline",
"pod-security.kubernetes.io/enforce-version": "latest",
}
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
@@ -45,6 +50,9 @@ func CreateNamespace(ctx context.Context, client TestClient, namespace string) e
func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace string, label map[string]string) error {
ns := builder.ForNamespace(namespace).Result()
ns.Labels = label
// Add label to avoid PSA check.
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
@@ -54,6 +62,11 @@ func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace
func CreateNamespaceWithAnnotation(ctx context.Context, client TestClient, namespace string, annotation map[string]string) error {
ns := builder.ForNamespace(namespace).Result()
// Add label to avoid PSA check.
ns.Labels = map[string]string{
"pod-security.kubernetes.io/enforce": "baseline",
"pod-security.kubernetes.io/enforce-version": "latest",
}
ns.ObjectMeta.Annotations = annotation
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {

View File

@@ -26,7 +26,34 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) {
func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1.Pod, error) {
if pvcName != "" && len(volumeNameList) != 1 {
return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty")
}
volumes := []corev1.Volume{}
for _, volume := range volumeNameList {
var _pvcName string
if pvcName == "" {
_pvcName = fmt.Sprintf("pvc-%s", volume)
} else {
_pvcName = pvcName
}
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
if err != nil {
return nil, err
}
volumes = append(volumes, corev1.Volume{
Name: volume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
})
}
vmList := []corev1.VolumeMount{}
for _, v := range volumes {
vmList = append(vmList, corev1.VolumeMount{
@@ -34,9 +61,11 @@ func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*co
MountPath: "/" + v.Name,
})
}
p := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: name,
Annotations: ann,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{

View File

@@ -38,7 +38,7 @@ func CreatePVC(client TestClient, ns, name, sc string, ann map[string]string) (*
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
corev1.ResourceStorage: resource.MustParse("1Mi"),
},
},
StorageClassName: &sc,

View File

@@ -200,6 +200,13 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
}
labelNamespaceCmd := exec.CommandContext(ctx, "kubectl", "label", "namespace", namespace, "pod-security.kubernetes.io/enforce=baseline", "pod-security.kubernetes.io/enforce-version=latest", "--overwrite=true")
_, stderr, err = veleroexec.RunCommand(labelNamespaceCmd)
fmt.Printf("Label namespace with PSA policy: %s\n", labelNamespaceCmd)
if err != nil {
return errors.Wrapf(err, "failed to label namespace with PSA policy, stderr=%s", stderr)
}
kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
"-n", namespace, "-w", "--timeout=30m")
_, stderr, err = veleroexec.RunCommand(kibishiiSetWaitCmd)