mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-11 07:20:22 +00:00
Compare commits
32 Commits
v1.16.1-rc
...
v1.11.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bdbe7eb242 | ||
|
|
5afe837f76 | ||
|
|
350cb6dec6 | ||
|
|
ef23da3289 | ||
|
|
6862fb84b9 | ||
|
|
c8e405c89b | ||
|
|
5836a2a0c9 | ||
|
|
a1e08f4eec | ||
|
|
61a08ccc30 | ||
|
|
46a355c293 | ||
|
|
1cb966da57 | ||
|
|
286db706e9 | ||
|
|
f33ea376e9 | ||
|
|
1349e570f9 | ||
|
|
ba8465b87d | ||
|
|
7dccc17690 | ||
|
|
9b922782e1 | ||
|
|
dc0a712089 | ||
|
|
d6755f7953 | ||
|
|
6ac085316d | ||
|
|
0da2baa908 | ||
|
|
8628388445 | ||
|
|
495063b4f6 | ||
|
|
87794d4615 | ||
|
|
c3e7fd7a74 | ||
|
|
5c0c378797 | ||
|
|
7d0d56e5fa | ||
|
|
3c9570fd14 | ||
|
|
971396110f | ||
|
|
9de61aa5a0 | ||
|
|
5f3cb25311 | ||
|
|
e16cb76892 |
2
.github/workflows/crds-verify-kind.yaml
vendored
2
.github/workflows/crds-verify-kind.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20.6'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
|
||||
4
.github/workflows/e2e-test-kind.yaml
vendored
4
.github/workflows/e2e-test-kind.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20.6'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20.6'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
5
.github/workflows/pr-ci-check.yml
vendored
5
.github/workflows/pr-ci-check.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20.6'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
@@ -32,4 +32,5 @@ jobs:
|
||||
- name: Run staticcheck
|
||||
uses: dominikh/staticcheck-action@v1.3.0
|
||||
with:
|
||||
version: "2022.1.3"
|
||||
version: "2023.1.3"
|
||||
install-go: false
|
||||
29
.github/workflows/push.yml
vendored
29
.github/workflows/push.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20.6'
|
||||
id: go
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
@@ -60,10 +60,21 @@ jobs:
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v2'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo rm -f /mnt/swapfile
|
||||
docker image prune -a --force
|
||||
|
||||
# Build and push Velero image to docker registry
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
VERSION=$(./hack/docker-push.sh | grep 'VERSION:' | awk -F: '{print $2}' | xargs)
|
||||
@@ -87,19 +98,3 @@ jobs:
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v1'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
|
||||
# Push image to GCR to facilitate some environments that have rate limitation to docker hub, e.g. vSphere.
|
||||
- name: Publish container image to GCR
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo rm -f /mnt/swapfile
|
||||
docker image prune -a --force
|
||||
REGISTRY=gcr.io/velero-gcp ./hack/docker-push.sh
|
||||
|
||||
@@ -54,3 +54,10 @@ release:
|
||||
name: velero
|
||||
draft: true
|
||||
prerelease: auto
|
||||
|
||||
git:
|
||||
# What should be used to sort tags when gathering the current and previous
|
||||
# tags if there are more than one tag in the same commit.
|
||||
#
|
||||
# Default: `-version:refname`
|
||||
tag_sort: -version:creatordate
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.6-bullseye as velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -44,7 +44,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN}
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.6-bullseye as restic-builder
|
||||
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
@@ -66,7 +66,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh
|
||||
|
||||
# Velero image packing section
|
||||
FROM gcr.io/distroless/base-nossl-debian11:nonroot
|
||||
FROM gcr.io/distroless/base-nossl-debian11@sha256:9523ef8cf054e23a81e722d231c6f604ab43a03c5b174b5c8386c78c0b6473d0
|
||||
|
||||
LABEL maintainer="Nolan Brubaker <brubakern@vmware.com>"
|
||||
|
||||
|
||||
5
Makefile
5
Makefile
@@ -22,9 +22,11 @@ PKG := github.com/vmware-tanzu/velero
|
||||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= velero
|
||||
GCR_REGISTRY ?= gcr.io/velero-gcp
|
||||
|
||||
# Image name
|
||||
IMAGE ?= $(REGISTRY)/$(BIN)
|
||||
GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
|
||||
|
||||
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
|
||||
# that pull base images from different registries.
|
||||
@@ -66,8 +68,10 @@ TAG_LATEST ?= false
|
||||
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION) $(GCR_IMAGE):latest
|
||||
else
|
||||
IMAGE_TAGS ?= $(IMAGE):$(VERSION)
|
||||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
ifeq ($(shell docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
|
||||
@@ -183,6 +187,7 @@ endif
|
||||
--output=type=$(BUILDX_OUTPUT_TYPE) \
|
||||
--platform $(BUILDX_PLATFORMS) \
|
||||
$(addprefix -t , $(IMAGE_TAGS)) \
|
||||
$(addprefix -t , $(GCR_IMAGE_TAGS)) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -50,7 +50,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.19 as tilt-helper
|
||||
FROM golang:1.20.6 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1,3 +1,26 @@
|
||||
## v1.11.1
|
||||
### 2023-07-19
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.11.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.11.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.11/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.11/upgrade-to-1.11/
|
||||
|
||||
### All changes
|
||||
* Add support for OpenStack CSI drivers topology keys (#6488, @kayrus)
|
||||
* Enhance the code because of #6297, the return value of GetBucketRegion is not recorded, as a result, when it fails, we have no way to get the cause (#6477, @Lyndon-Li)
|
||||
* Fixed a bug where status.progress is not getting updated for backups. (#6324, @blackpiglet)
|
||||
* Restore Endpoints before Services (#6316, @ywk253100)
|
||||
* Fix issue #6182. If pod is not running, don't treat it as an error, let it go and leave a warning. (#6189, @Lyndon-Li)
|
||||
|
||||
|
||||
## v1.11
|
||||
### 2023-04-07
|
||||
|
||||
@@ -29,17 +52,23 @@ The Progress() and Cancel() methods are needed to facilitate long-running Restor
|
||||
This is intended as a replacement for the previously-approved Upload Progress Monitoring design ([Upload Progress Monitoring](https://github.com/vmware-tanzu/velero/blob/main/design/upload-progress.md)) to expand the supported use cases beyond snapshot upload to include what was previously called Async Backup/Restore Item Actions.
|
||||
|
||||
#### Flexible resource policy that can filter volumes to skip in the backup
|
||||
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [policy API design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/handle-backup-of-volumes-by-resources-filters.md#api-design) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
|
||||
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [Resource policies rules](https://velero.io/docs/v1.11/resource-filtering/#resource-policies) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
|
||||
|
||||
#### Resource Filters that can distinguish cluster scope and namespace scope resources
|
||||
This feature adds four new resource filters for backup. The new filters are separated into cluster scope and namespace scope. Before this feature, Velero could not filter cluster scope resources precisely. This feature provides the ability and refactors existing resource filter parameters.
|
||||
|
||||
#### New parameter in installation to customize the serviceaccount name
|
||||
The `velero install` sub-command now includes a new parameter,`--service-account-name`, which allows users to specify the ServiceAccountName for the Velero and node-agent pods. This feature may be particularly useful for users who utilize IRSA (IAM Roles for Service Accounts) in Amazon EKS (Elastic Kubernetes Service)."
|
||||
|
||||
#### Add a parameter for setting the Velero server connection with the k8s API server's timeout
|
||||
In Velero, some code pieces need to communicate with the k8s API server. Before v1.11, these code pieces used hard-code timeout settings. This feature adds a resource-timeout parameter in the velero server binary to make it configurable.
|
||||
|
||||
#### Add resource list in the output of the restore describe command
|
||||
Before this feature, Velero restore didn't have a restored resources list as the Velero backup. It's not convenient for users to learn what is restored. This feature adds the resources list and the handling result of the resources (including created, updated, failed, and skipped).
|
||||
|
||||
#### Support JSON format output of backup describe command
|
||||
Before the Velero v1.11 release, users could not choose Velero's backup describe command's output format. The command output format is friendly for human reading, but it's not a structured output, and it's not easy for other programs to get information from it. Velero v1.11 adds a JSON format output for the backup describe command.
|
||||
|
||||
#### Refactor controllers with controller-runtime
|
||||
In v1.11, Backup Controller and Restore controller are refactored with controller-runtime. Till v1.11, all Velero controllers use the controller-runtime framework.
|
||||
|
||||
@@ -59,6 +88,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
|
||||
|
||||
|
||||
### All Changes
|
||||
* Ignore not found error during patching managedFields (#6110, @ywk253100)
|
||||
* Modify new scope resource filters name. (#6089, @blackpiglet)
|
||||
* Make Velero not exits when EnableCSI is on and CSI snapshot not installed (#6062, @blackpiglet)
|
||||
* Restore Services before Clusters (#6057, @ywk253100)
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.18
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.21.0
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=linux/amd64 golang:1.19-bullseye
|
||||
FROM --platform=linux/amd64 golang:1.20.6-bullseye
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -50,7 +50,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v
|
||||
RUN go install github.com/golang/protobuf/protoc-gen-go@v1.4.3
|
||||
|
||||
# get goreleaser
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.12.3/goreleaser_Linux_x86_64.tar.gz && \
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.15.2/goreleaser_Linux_x86_64.tar.gz && \
|
||||
tar xvf goreleaser_Linux_x86_64.tar.gz && \
|
||||
mv goreleaser /usr/bin/goreleaser && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
@@ -48,12 +48,10 @@ if [[ "${PUBLISH:-}" != "TRUE" ]]; then
|
||||
goreleaser release \
|
||||
--clean \
|
||||
--release-notes="${RELEASE_NOTES_FILE}" \
|
||||
--skip-publish \
|
||||
--config goreleaser.yaml
|
||||
--skip-publish
|
||||
else
|
||||
echo "Getting ready to publish"
|
||||
goreleaser release \
|
||||
--clean \
|
||||
--release-notes="${RELEASE_NOTES_FILE}"
|
||||
--config goreleaser.yaml
|
||||
fi
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
git:
|
||||
# What should be used to sort tags when gathering the current and previous
|
||||
# tags if there are more than one tag in the same commit.
|
||||
#
|
||||
# Default: `-version:refname`
|
||||
tag_sort: -version:creatordate
|
||||
@@ -279,12 +279,16 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
|
||||
items := collector.getAllItems()
|
||||
log.WithField("progress", "").Infof("Collected %d items matching the backup spec from the Kubernetes API (actual number of items backed up may be more or less depending on velero.io/exclude-from-backup annotation, plugins returning additional related items to back up, etc.)", len(items))
|
||||
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
original := backupRequest.Backup.DeepCopy()
|
||||
backupRequest.Backup.Status.Progress.TotalItems = len(items)
|
||||
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
|
||||
updated := backupRequest.Backup.DeepCopy()
|
||||
if updated.Status.Progress == nil {
|
||||
updated.Status.Progress = &velerov1api.BackupProgress{}
|
||||
}
|
||||
|
||||
updated.Status.Progress.TotalItems = len(items)
|
||||
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
|
||||
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress.totalItems")
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
|
||||
itemBackupper := &itemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
@@ -333,12 +337,16 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
|
||||
lastUpdate = &val
|
||||
case <-ticker.C:
|
||||
if lastUpdate != nil {
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
|
||||
original := backupRequest.Backup.DeepCopy()
|
||||
backupRequest.Backup.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
|
||||
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
|
||||
updated := backupRequest.Backup.DeepCopy()
|
||||
if updated.Status.Progress == nil {
|
||||
updated.Status.Progress = &velerov1api.BackupProgress{}
|
||||
}
|
||||
updated.Status.Progress.TotalItems = lastUpdate.totalItems
|
||||
updated.Status.Progress.ItemsBackedUp = lastUpdate.itemsBackedUp
|
||||
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
|
||||
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: lastUpdate.totalItems, ItemsBackedUp: lastUpdate.itemsBackedUp}
|
||||
lastUpdate = nil
|
||||
}
|
||||
}
|
||||
@@ -413,12 +421,17 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
|
||||
|
||||
// do a final update on progress since we may have just added some CRDs and may not have updated
|
||||
// for the last few processed items.
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
|
||||
original = backupRequest.Backup.DeepCopy()
|
||||
backupRequest.Backup.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
|
||||
if err := kube.PatchResource(original, backupRequest.Backup, kb.kbClient); err != nil {
|
||||
updated = backupRequest.Backup.DeepCopy()
|
||||
if updated.Status.Progress == nil {
|
||||
updated.Status.Progress = &velerov1api.BackupProgress{}
|
||||
}
|
||||
updated.Status.Progress.TotalItems = len(backupRequest.BackedUpItems)
|
||||
updated.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems)
|
||||
|
||||
if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil {
|
||||
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress")
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
|
||||
|
||||
log.WithField("progress", "").Infof("Backed up a total of %d items", len(backupRequest.BackedUpItems))
|
||||
|
||||
|
||||
@@ -448,6 +448,10 @@ const (
|
||||
azureCsiZoneKey = "topology.disk.csi.azure.com/zone"
|
||||
gkeCsiZoneKey = "topology.gke.io/zone"
|
||||
gkeZoneSeparator = "__"
|
||||
|
||||
// OpenStack CSI drivers topology keys
|
||||
cinderCsiZoneKey = "topology.manila.csi.openstack.org/zone"
|
||||
manilaCsiZoneKey = "topology.cinder.csi.openstack.org/zone"
|
||||
)
|
||||
|
||||
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||
@@ -502,7 +506,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
||||
if !labelFound {
|
||||
var k string
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabelDeprecated)
|
||||
k, pvFailureDomainZone = zoneFromPVNodeAffinity(pv, awsEbsCsiZoneKey, azureCsiZoneKey, gkeCsiZoneKey, zoneLabel, zoneLabelDeprecated)
|
||||
k, pvFailureDomainZone = zoneFromPVNodeAffinity(pv, awsEbsCsiZoneKey, azureCsiZoneKey, gkeCsiZoneKey, cinderCsiZoneKey, manilaCsiZoneKey, zoneLabel, zoneLabelDeprecated)
|
||||
if pvFailureDomainZone != "" {
|
||||
log.Infof("zone info from nodeAffinity requirements: %s, key: %s", pvFailureDomainZone, k)
|
||||
} else {
|
||||
|
||||
@@ -514,10 +514,13 @@ High priorities:
|
||||
- Replica sets go before deployments/other controllers so they can be explicitly
|
||||
restored and be adopted by controllers.
|
||||
- CAPI ClusterClasses go before Clusters.
|
||||
- Endpoints go before Services so no new Endpoints will be created
|
||||
- Services go before Clusters so they can be adopted by AKO-operator and no new Services will be created
|
||||
for the same clusters
|
||||
|
||||
Low priorities:
|
||||
- Tanzu ClusterBootstraps go last as it can reference any other kind of resources.
|
||||
ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster
|
||||
- ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster
|
||||
- CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic.
|
||||
Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters.
|
||||
See https://github.com/kubernetes-sigs/cluster-api/issues/4105
|
||||
@@ -543,6 +546,7 @@ var defaultRestorePriorities = restore.Priorities{
|
||||
// in the backup.
|
||||
"replicasets.apps",
|
||||
"clusterclasses.cluster.x-k8s.io",
|
||||
"endpoints",
|
||||
"services",
|
||||
},
|
||||
LowPriorities: []string{
|
||||
|
||||
@@ -132,6 +132,21 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := kube.IsPodRunning(pod)
|
||||
if err != nil {
|
||||
for _, volumeName := range volumesToBackup {
|
||||
err = errors.Wrapf(err, "backup for volume %s is skipped", volumeName)
|
||||
log.WithError(err).Warn("Skip pod volume")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
repositoryType := getRepositoryType(b.uploaderType)
|
||||
if repositoryType == "" {
|
||||
err := errors.Errorf("empty repository type, uploader %s", b.uploaderType)
|
||||
@@ -143,16 +158,6 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
err = kube.IsPodRunning(pod)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
// get a single non-exclusive lock since we'll wait for all individual
|
||||
// backups to be complete before releasing it.
|
||||
b.repoLocker.Lock(repo.Name)
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
goerr "errors"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
@@ -76,16 +78,20 @@ func GetS3Credentials(config map[string]string) (credentials.Value, error) {
|
||||
// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error
|
||||
// if the region cannot be determined.
|
||||
func GetAWSBucketRegion(bucket string) (string, error) {
|
||||
var region string
|
||||
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
var region string
|
||||
var requestErrs []error
|
||||
|
||||
for _, partition := range endpoints.DefaultPartitions() {
|
||||
for regionHint := range partition.Regions() {
|
||||
region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
|
||||
region, err = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
|
||||
if err != nil {
|
||||
requestErrs = append(requestErrs, errors.Wrapf(err, "error to get region with hint %s", regionHint))
|
||||
}
|
||||
|
||||
// we only need to try a single region hint per partition, so break after the first
|
||||
break
|
||||
@@ -96,5 +102,9 @@ func GetAWSBucketRegion(bucket string) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("unable to determine bucket's region")
|
||||
if requestErrs == nil {
|
||||
return "", errors.Errorf("unable to determine region by bucket %s", bucket)
|
||||
} else {
|
||||
return "", errors.Wrapf(goerr.Join(requestErrs...), "error to get region by bucket %s", bucket)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1514,10 +1514,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
if patchBytes != nil {
|
||||
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
|
||||
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
if !apierrors.IsNotFound(err) {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
} else {
|
||||
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
|
||||
if groupResource == kuberesource.Pods {
|
||||
|
||||
@@ -71,7 +71,7 @@ func NewUploaderProvider(
|
||||
log logrus.FieldLogger,
|
||||
) (Provider, error) {
|
||||
if credGetter.FromFile == nil {
|
||||
return nil, errors.New("uninitialized FileStore credentail is not supported")
|
||||
return nil, errors.New("uninitialized FileStore credential is not supported")
|
||||
}
|
||||
if uploaderType == uploader.KopiaType {
|
||||
// We use the hardcode repositoryType velerov1api.BackupRepositoryTypeKopia for now, because we have only one implementation of unified repo.
|
||||
|
||||
@@ -87,7 +87,7 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
|
||||
p.oldNodeName = nodeName
|
||||
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
|
||||
pvcAnn := map[string]string{p.ann: nodeName}
|
||||
_, err := CreatePodWithPVC(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn)
|
||||
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn, nil)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
|
||||
@@ -85,7 +85,7 @@ func (s *StorageClasssChanging) CreateResources() error {
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
|
||||
_, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil)
|
||||
_, err := CreatePod(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil, nil)
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() {
|
||||
|
||||
@@ -117,6 +117,7 @@ var _ = Describe("[Backups][BackupsSync] Backups in object storage are synced to
|
||||
|
||||
var _ = Describe("[Schedule][BR][Pause][LongTime] Backup will be created periodly by schedule defined by a Cron expression", ScheduleBackupTest)
|
||||
var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources)
|
||||
var _ = Describe("[Schedule][BackupCreation] Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", ScheduleBackupCreationTest)
|
||||
|
||||
var _ = Describe("[PrivilegesMgmt][SSR] Velero test on ssr object when controller namespace mix-ups", SSRTest)
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ func (p *PVBackupFiltering) CreateResources() error {
|
||||
podName := fmt.Sprintf("pod-%d", i)
|
||||
pods = append(pods, podName)
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
|
||||
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil)
|
||||
pod, err := CreatePod(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil, nil)
|
||||
Expect(err).To(Succeed())
|
||||
ann := map[string]string{
|
||||
p.annotation: volumesToAnnotation,
|
||||
|
||||
138
test/e2e/schedule/schedule-backup-creation.go
Normal file
138
test/e2e/schedule/schedule-backup-creation.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package schedule
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
. "github.com/vmware-tanzu/velero/test/e2e"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/velero"
|
||||
)
|
||||
|
||||
type ScheduleBackupCreation struct {
|
||||
TestCase
|
||||
namespace string
|
||||
ScheduleName string
|
||||
ScheduleArgs []string
|
||||
Period int //Limitation: The unit is minitue only and 60 is divisible by it
|
||||
randBackupName string
|
||||
verifyTimes int
|
||||
volume string
|
||||
podName string
|
||||
pvcName string
|
||||
podAnn map[string]string
|
||||
podSleepDuration time.Duration
|
||||
}
|
||||
|
||||
var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{namespace: "sch1", TestCase: TestCase{NSBaseName: "schedule-backup-creation-test", UseVolumeSnapshots: false}})
|
||||
|
||||
func (n *ScheduleBackupCreation) Init() error {
|
||||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.Period = 3 // Unit is minute
|
||||
n.verifyTimes = 5 // More larger verify times more confidence we have
|
||||
podSleepDurationStr := "300s"
|
||||
n.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr)
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
|
||||
FailedMSG: "Failed to verify schedule back creation behavior",
|
||||
Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
|
||||
}
|
||||
n.podAnn = map[string]string{
|
||||
"pre.hook.backup.velero.io/container": n.podName,
|
||||
"pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]",
|
||||
"pre.hook.backup.velero.io/timeout": "600s",
|
||||
}
|
||||
n.volume = "volume-1"
|
||||
n.podName = "pod-1"
|
||||
n.pvcName = "pvc-1"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *ScheduleBackupCreation) StartRun() error {
|
||||
n.namespace = fmt.Sprintf("%s-%s", n.NSBaseName, "ns")
|
||||
n.ScheduleName = n.ScheduleName + "schedule-" + UUIDgen.String()
|
||||
n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String()
|
||||
|
||||
n.ScheduleArgs = []string{
|
||||
"--include-namespaces", n.namespace,
|
||||
"--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *",
|
||||
"--default-volumes-to-fs-backup",
|
||||
}
|
||||
Expect(n.Period < 30).To(Equal(true))
|
||||
return nil
|
||||
}
|
||||
func (p *ScheduleBackupCreation) CreateResources() error {
|
||||
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
|
||||
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
|
||||
Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(),
|
||||
fmt.Sprintf("Failed to create namespace %s", p.namespace))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
|
||||
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, nil, p.podAnn)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *ScheduleBackupCreation) Backup() error {
|
||||
// Wait until the beginning of the given period to create schedule, it will give us
|
||||
// a predictable period to wait for the first scheduled backup, and verify no immediate
|
||||
// scheduled backup was created between schedule creation and first scheduled backup.
|
||||
By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() {
|
||||
for i := 0; i < n.Period*60/30; i++ {
|
||||
time.Sleep(30 * time.Second)
|
||||
now := time.Now().Minute()
|
||||
triggerNow := now % n.Period
|
||||
if triggerNow == 0 {
|
||||
Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string {
|
||||
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "")
|
||||
return "Fail to restore workload"
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
By("Delay one more minute to make sure the new backup was created in the given period", func() {
|
||||
time.Sleep(1 * time.Minute)
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() {
|
||||
for i := 1; i <= n.verifyTimes; i++ {
|
||||
fmt.Printf("Start to sleep %d minute #%d time...\n", n.podSleepDuration, i)
|
||||
mi, _ := time.ParseDuration("60s")
|
||||
time.Sleep(n.podSleepDuration + mi)
|
||||
bMap := make(map[string]string)
|
||||
backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName)
|
||||
Expect(err).To(Succeed())
|
||||
Expect(len(backupsInfo) == i).To(Equal(true))
|
||||
for index, bi := range backupsInfo {
|
||||
bList := strings.Split(bi, ",")
|
||||
fmt.Printf("Backup %d: %v\n", index, bList)
|
||||
bMap[bList[0]] = bList[1]
|
||||
_, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1])
|
||||
Expect(err).To(Succeed())
|
||||
}
|
||||
if i == n.verifyTimes-1 {
|
||||
backupInfo := backupsInfo[rand.Intn(len(backupsInfo))]
|
||||
n.randBackupName = strings.Split(backupInfo, ",")[0]
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *ScheduleBackupCreation) Restore() error {
|
||||
return nil
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func (n *ScheduleBackup) Init() error {
|
||||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.Period = 3 // Unit is minute
|
||||
n.verifyTimes = 5 // More verify times more confidence
|
||||
n.verifyTimes = 5 // More larger verify times more confidence we have
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: "Set up a scheduled backup defined by a Cron expression",
|
||||
FailedMSG: "Failed to schedule a backup",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
@@ -64,12 +63,12 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods
|
||||
checkPod, err := client.ClientGo.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
//Should ignore "etcdserver: request timed out" kind of errors, try to get pod status again before timeout.
|
||||
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1api.PodRunning)))
|
||||
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1.PodRunning)))
|
||||
return false, nil
|
||||
}
|
||||
// If any pod is still waiting we don't need to check any more so return and wait for next poll interval
|
||||
if checkPod.Status.Phase != corev1api.PodRunning {
|
||||
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1api.PodRunning)
|
||||
if checkPod.Status.Phase != corev1.PodRunning {
|
||||
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1.PodRunning)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@@ -255,35 +254,6 @@ func GetPVByPodName(client TestClient, namespace, podName string) (string, error
|
||||
}
|
||||
return pv_value.Name, nil
|
||||
}
|
||||
func CreatePodWithPVC(client TestClient, ns, podName, sc, pvcName string, volumeNameList []string, pvcAnn map[string]string) (*corev1.Pod, error) {
|
||||
volumes := []corev1.Volume{}
|
||||
for _, volume := range volumeNameList {
|
||||
var _pvcName string
|
||||
if pvcName == "" {
|
||||
_pvcName = fmt.Sprintf("pvc-%s", volume)
|
||||
} else {
|
||||
_pvcName = pvcName
|
||||
}
|
||||
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: volume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
pod, err := CreatePod(client, ns, podName, volumes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error {
|
||||
arg := []string{"exec", "-n", namespace, "-c", podName, podName,
|
||||
|
||||
@@ -35,6 +35,11 @@ import (
|
||||
|
||||
func CreateNamespace(ctx context.Context, client TestClient, namespace string) error {
|
||||
ns := builder.ForNamespace(namespace).Result()
|
||||
// Add label to avoid PSA check.
|
||||
ns.Labels = map[string]string{
|
||||
"pod-security.kubernetes.io/enforce": "baseline",
|
||||
"pod-security.kubernetes.io/enforce-version": "latest",
|
||||
}
|
||||
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
@@ -45,6 +50,9 @@ func CreateNamespace(ctx context.Context, client TestClient, namespace string) e
|
||||
func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace string, label map[string]string) error {
|
||||
ns := builder.ForNamespace(namespace).Result()
|
||||
ns.Labels = label
|
||||
// Add label to avoid PSA check.
|
||||
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
|
||||
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
@@ -54,6 +62,11 @@ func CreateNamespaceWithLabel(ctx context.Context, client TestClient, namespace
|
||||
|
||||
func CreateNamespaceWithAnnotation(ctx context.Context, client TestClient, namespace string, annotation map[string]string) error {
|
||||
ns := builder.ForNamespace(namespace).Result()
|
||||
// Add label to avoid PSA check.
|
||||
ns.Labels = map[string]string{
|
||||
"pod-security.kubernetes.io/enforce": "baseline",
|
||||
"pod-security.kubernetes.io/enforce-version": "latest",
|
||||
}
|
||||
ns.ObjectMeta.Annotations = annotation
|
||||
_, err := client.ClientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
|
||||
@@ -26,7 +26,34 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) {
|
||||
func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1.Pod, error) {
|
||||
if pvcName != "" && len(volumeNameList) != 1 {
|
||||
return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty")
|
||||
}
|
||||
volumes := []corev1.Volume{}
|
||||
for _, volume := range volumeNameList {
|
||||
var _pvcName string
|
||||
if pvcName == "" {
|
||||
_pvcName = fmt.Sprintf("pvc-%s", volume)
|
||||
} else {
|
||||
_pvcName = pvcName
|
||||
}
|
||||
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: volume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
vmList := []corev1.VolumeMount{}
|
||||
for _, v := range volumes {
|
||||
vmList = append(vmList, corev1.VolumeMount{
|
||||
@@ -34,9 +61,11 @@ func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*co
|
||||
MountPath: "/" + v.Name,
|
||||
})
|
||||
}
|
||||
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Name: name,
|
||||
Annotations: ann,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
|
||||
@@ -38,7 +38,7 @@ func CreatePVC(client TestClient, ns, name, sc string, ann map[string]string) (*
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
corev1.ResourceStorage: resource.MustParse("1Mi"),
|
||||
},
|
||||
},
|
||||
StorageClassName: &sc,
|
||||
|
||||
@@ -200,6 +200,13 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
|
||||
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
|
||||
}
|
||||
|
||||
labelNamespaceCmd := exec.CommandContext(ctx, "kubectl", "label", "namespace", namespace, "pod-security.kubernetes.io/enforce=baseline", "pod-security.kubernetes.io/enforce-version=latest", "--overwrite=true")
|
||||
_, stderr, err = veleroexec.RunCommand(labelNamespaceCmd)
|
||||
fmt.Printf("Label namespace with PSA policy: %s\n", labelNamespaceCmd)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to label namespace with PSA policy, stderr=%s", stderr)
|
||||
}
|
||||
|
||||
kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
|
||||
"-n", namespace, "-w", "--timeout=30m")
|
||||
_, stderr, err = veleroexec.RunCommand(kibishiiSetWaitCmd)
|
||||
|
||||
Reference in New Issue
Block a user