mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-31 17:12:07 +00:00
Compare commits
67 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4121808f38 | ||
|
|
420a65a116 | ||
|
|
3bf4a7dced | ||
|
|
2a5804b595 | ||
|
|
9a3fabbc55 | ||
|
|
99a46ed818 | ||
|
|
93e8379530 | ||
|
|
72ddfd7d78 | ||
|
|
8b3ba78c8c | ||
|
|
b34f2deff2 | ||
|
|
e9666f9aea | ||
|
|
e6aab8ca93 | ||
|
|
99f12b85ba | ||
|
|
f8938e7fed | ||
|
|
cabb04575e | ||
|
|
60dbcbc60d | ||
|
|
4ade8cf8a2 | ||
|
|
826c73131e | ||
|
|
031df8d5e0 | ||
|
|
21691451e9 | ||
|
|
50d7b1cff1 | ||
|
|
37df853a99 | ||
|
|
d545ad49ba | ||
|
|
7831bf25b9 | ||
|
|
2abe91e08c | ||
|
|
1ebe357d18 | ||
|
|
9df17eb02b | ||
|
|
f2a27c3864 | ||
|
|
8ee3436f5c | ||
|
|
4847eeaf62 | ||
|
|
1ec281a64e | ||
|
|
25de1bb3b6 | ||
|
|
e21b21c19e | ||
|
|
b19cad9d01 | ||
|
|
9b6c4b1d47 | ||
|
|
b9159c22ca | ||
|
|
112bea520e | ||
|
|
7e15e9ba05 | ||
|
|
f50cafa472 | ||
|
|
a7b2985c83 | ||
|
|
59289fba76 | ||
|
|
925479553a | ||
|
|
47340e67af | ||
|
|
25a7ef0e87 | ||
|
|
799d596d5c | ||
|
|
5ba00dfb09 | ||
|
|
f1476defde | ||
|
|
67ff0dcbe0 | ||
|
|
aad9dd9068 | ||
|
|
b636334079 | ||
|
|
4d44705ed8 | ||
|
|
81c5b6692d | ||
|
|
02edbc0c65 | ||
|
|
e8208097ba | ||
|
|
4c30499340 | ||
|
|
2a9203f1b2 | ||
|
|
3be76da952 | ||
|
|
7132720a49 | ||
|
|
2dbfbc29e8 | ||
|
|
80da461458 | ||
|
|
fdee2700a7 | ||
|
|
8e1c4a7dc5 | ||
|
|
09b5183fce | ||
|
|
c5b70b4a0d | ||
|
|
248a840918 | ||
|
|
04fb20676d | ||
|
|
996d2a025f |
23
.github/workflows/e2e-test-kind.yaml
vendored
23
.github/workflows/e2e-test-kind.yaml
vendored
@@ -8,18 +8,26 @@ on:
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
outputs:
|
||||
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
@@ -97,6 +105,7 @@ jobs:
|
||||
needs:
|
||||
- build
|
||||
- setup-test-matrix
|
||||
- get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
|
||||
@@ -104,10 +113,12 @@ jobs:
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
# Fetch the pre-built MinIO image from the build job
|
||||
- name: Fetch built MinIO Image
|
||||
uses: actions/cache@v4
|
||||
|
||||
33
.github/workflows/get-go-version.yaml
vendored
Normal file
33
.github/workflows/get-go-version.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: "The target branch's ref"
|
||||
required: true
|
||||
type: string
|
||||
outputs:
|
||||
version:
|
||||
description: "The expected Go version"
|
||||
value: ${{ jobs.extract.outputs.version }}
|
||||
|
||||
jobs:
|
||||
extract:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.pick-version.outputs.version }}
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- id: pick-version
|
||||
run: |
|
||||
if [ "${{ inputs.ref }}" == "main" ]; then
|
||||
version=$(grep '^go ' go.mod | awk '{print $2}' | cut -d. -f1-2)
|
||||
else
|
||||
goDirectiveVersion=$(grep '^go ' go.mod | awk '{print $2}')
|
||||
toolChainVersion=$(grep '^toolchain ' go.mod | awk '{print $2}')
|
||||
version=$(printf "%s\n%s\n" "$goDirectiveVersion" "$toolChainVersion" | sort -V | tail -n1)
|
||||
fi
|
||||
|
||||
echo "version=$version"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
14
.github/workflows/pr-ci-check.yml
vendored
14
.github/workflows/pr-ci-check.yml
vendored
@@ -1,18 +1,26 @@
|
||||
name: Pull Request CI Check
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run CI
|
||||
needs: get-go-version
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
- name: Upload test coverage
|
||||
|
||||
14
.github/workflows/pr-linter-check.yml
vendored
14
.github/workflows/pr-linter-check.yml
vendored
@@ -7,16 +7,24 @@ on:
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build:
|
||||
name: Run Linter Check
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
|
||||
13
.github/workflows/push.yml
vendored
13
.github/workflows/push.yml
vendored
@@ -9,17 +9,24 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
get-go-version:
|
||||
uses: ./.github/workflows/get-go-version.yaml
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: get-go-version
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
|
||||
- name: Set up Go version
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ${{ needs.get-go-version.outputs.version }}
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
2
.github/workflows/stale-issues.yml
vendored
2
.github/workflows/stale-issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9.1.0
|
||||
- uses: actions/stale@v10.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.11-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.11-bookworm AS restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -73,7 +73,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.90
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.11-bookworm AS velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
||||
@@ -42,7 +42,7 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
||||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, and 1.33.1 |
|
||||
| 1.17 | 1.18-latest | 1.31.7, 1.32.3, 1.33.1, and 1.34.0 |
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.24.11 as tilt-helper
|
||||
FROM golang:1.24 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -1,54 +1,3 @@
|
||||
## v1.17.2
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.17.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.17.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.17/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.17/upgrade-to-1.17/
|
||||
|
||||
### All Changes
|
||||
* Track actual resource names for GenerateName in restore status (#9409, @shubham-pampattiwar)
|
||||
* Fix managed fields patch for resources using GenerateName (#9408, @shubham-pampattiwar)
|
||||
* don't copy securitycontext from first container if configmap found (#9394, @sseago)
|
||||
* Add Role, RoleBinding, ClusterRole, and ClusterRoleBinding in restore sequence. (#9479, @blackpiglet)
|
||||
|
||||
|
||||
## v1.17.1
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.17.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.17.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.17/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.17/upgrade-to-1.17/
|
||||
|
||||
### All Changes
|
||||
* Fix issue #9365, prevent fake completion notification due to multiple update of single PVR (#9376, @Lyndon-Li)
|
||||
* Fix issue #9332, add bytesDone for cache files (#9341, @Lyndon-Li)
|
||||
* VerifyJSONConfigs verify every elements in Data. (#9303, @blackpiglet)
|
||||
* Add option for privileged fs-backup pod (#9300, @sseago)
|
||||
* Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment (#9299, @shubham-pampattiwar)
|
||||
* Fix issue #9229, don't attach backupPVC to the source node (#9297, @Lyndon-Li)
|
||||
* Protect VolumeSnapshot field from race condition during multi-thread backup (#9292, @0xLeo258)
|
||||
* Implement concurrency control for cache of native VolumeSnapshotter plugin. (#9290, @0xLeo258)
|
||||
* Backport to 1.17 (PR#9244 Update AzureAD Microsoft Authentication Library to v1.5.0) (#9285, @priyansh17)
|
||||
* Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases (#9277, @shubham-pampattiwar)
|
||||
* Get pod list once per namespace in pvc IBA (#9266, @sseago)
|
||||
* Update AzureAD Microsoft Authentication Library to v1.5.0 (#9244, @priyansh17)
|
||||
* feat: Permit specifying annotations for the BackupPVC (#9173, @clementnuss)
|
||||
|
||||
|
||||
## v1.17
|
||||
|
||||
### Download
|
||||
|
||||
1
changelogs/unreleased/9173-clementnuss
Normal file
1
changelogs/unreleased/9173-clementnuss
Normal file
@@ -0,0 +1 @@
|
||||
feat: Permit specifying annotations for the BackupPVC
|
||||
1
changelogs/unreleased/9226-sseago
Normal file
1
changelogs/unreleased/9226-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Get pod list once per namespace in pvc IBA
|
||||
1
changelogs/unreleased/9233-Lyndon-Li
Normal file
1
changelogs/unreleased/9233-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9229, don't attach backupPVC to the source node
|
||||
1
changelogs/unreleased/9244-priyansh17
Normal file
1
changelogs/unreleased/9244-priyansh17
Normal file
@@ -0,0 +1 @@
|
||||
Update AzureAD Microsoft Authentication Library to v1.5.0
|
||||
1
changelogs/unreleased/9248-0xLeo258
Normal file
1
changelogs/unreleased/9248-0xLeo258
Normal file
@@ -0,0 +1 @@
|
||||
Protect VolumeSnapshot field from race condition during multi-thread backup
|
||||
1
changelogs/unreleased/9256-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9256-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Fix repository maintenance jobs to inherit allowlisted tolerations from Velero deployment
|
||||
1
changelogs/unreleased/9264-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9264-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Fix schedule controller to prevent backup queue accumulation during extended blocking scenarios by properly handling empty backup phases
|
||||
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
1
changelogs/unreleased/9269-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #7904, remove the code and doc for PVC node selection
|
||||
1
changelogs/unreleased/9281-0xLeo258
Normal file
1
changelogs/unreleased/9281-0xLeo258
Normal file
@@ -0,0 +1 @@
|
||||
Implement concurrency control for cache of native VolumeSnapshotter plugin.
|
||||
1
changelogs/unreleased/9295-sseago
Normal file
1
changelogs/unreleased/9295-sseago
Normal file
@@ -0,0 +1 @@
|
||||
Add option for privileged fs-backup pod
|
||||
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
1
changelogs/unreleased/9296-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9267, add events to data mover prepare diagnostic
|
||||
1
changelogs/unreleased/9302-blackpiglet
Normal file
1
changelogs/unreleased/9302-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
VerifyJSONConfigs verify every elements in Data.
|
||||
1
changelogs/unreleased/9329-T4iFooN-IX
Normal file
1
changelogs/unreleased/9329-T4iFooN-IX
Normal file
@@ -0,0 +1 @@
|
||||
Fix typos in documentation
|
||||
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
1
changelogs/unreleased/9333-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #9332, add bytesDone for cache files
|
||||
257
design/concurrent-backup-processing.md
Normal file
257
design/concurrent-backup-processing.md
Normal file
@@ -0,0 +1,257 @@
|
||||
# Concurrent Backup Processing
|
||||
|
||||
This enhancement will enable Velero to process multiple backups at the same time. This is largely a usability enhancement rather than a performance enhancement, since the overall backup throughput may not be significantly improved over the current implementation, since we are already processing individual backup items in parallel. It is a significant usability improvement, though, as with the current design, a user who submits a small backup may have to wait significantly longer than expected if the backup is submitted immediately after a large backup.
|
||||
|
||||
## Background
|
||||
|
||||
With the current implementation, only one backup may be `InProgress` at a time. A second backup created will not start processing until the first backup moves on to `WaitingForPluginOperations` or `Finalizing`. This is a usability concern, especially in clusters when multiple users are initiating backups. With this enhancement, we intend to allow multiple backups to be processed concurrently. This will allow backups to start processing immediately, even if a large backup was just submitted by another user. This enhancement will build on top of the prior parallel item processing feature by creating a dedicatede ItemBlock worker pool for each running backup. The pool will be created at the beginning of the backup reconcile, and the input channel will be passed to the Kubernetes backupper just like it is in the current release.
|
||||
|
||||
The primary challenge is to make sure that the same workload in multiple backups is not backed up concurrently. If that were to happen, we would risk data corruption, especially around the processing of pod hooks and volume backup. For this first release we will take a conservative, high-level approach to overlap detection. Two backups will not run concurrently if there is any overlap in included namespaces. For example, if a backup that includes `ns1` and `ns2` is running, then a second backup for `ns2` and `ns3` will not be started. If a backup which does not filter namespaces is running (either a whole cluster backup or a non-namespace-limited backup with a label selector) then no other backups will be started, since a backup across all namespaces overlaps with any other backup. Calculating item-level overlap for queued backups is problematic since we don't know which items are included in a backup until backup processing has begun. A future release may add ItemBlock overlap detection, where at the item block worker level, the same item will not be processed by two different workers at the same time. This works together with workload conflict detection to further detect conflicts in a more granular level for shared resources between backups. Eventually, with a more complete understanding of individual workloads (either via ItemBlocks or some higher level model), the namespace level overlap detection may be relaxed in future versions.
|
||||
|
||||
## Goals
|
||||
- Process multiple backups concurrently
|
||||
- Detect namespace overlap to avoid conflicts
|
||||
- For queued backups (not yet runnable due to concurrency limits or overlap), indicate the queue position in status
|
||||
|
||||
## Non Goals
|
||||
- Handling NFS PVs when more than one PV point to the same underlying NFS share
|
||||
- Handling VGDP cancellation for failed backups on restart
|
||||
- Mounting a PVC for scenarios in which /tmp is too small for the number of concurrent backups
|
||||
- Providing a mechanism to identify high priority backups which get preferential treatment in terms of ItemBlock worker availability
|
||||
- Item-level overlap detection (future feature)
|
||||
- Providing the ability to disable namespace-level overlap detection once Item-level overlap detection is in place (although this may be supported in a future version).
|
||||
|
||||
## High-Level Design
|
||||
|
||||
### Backup CRD changes
|
||||
|
||||
Two new backup phases will be added: `Queued` and `ReadyToStart`. In the Backup workflow, new backups will be moved to the Queued phase when they are added to the backup queue. When a backup is removed from the queue because it is now able to run, it will be moved to the `ReadyToStart` phase, which will allow the backup controller to start processing it.
|
||||
|
||||
In addition, a new Status field, `QueuePosition`, will be added to track the backup's current position in the queue.
|
||||
|
||||
### New Controller: `backupQueueReconciler`
|
||||
|
||||
A new reconciler will be added, `backupQueueReconciler` which will use the current `backupReconciler` logic for reconciling `New` backups but instead of running the backup, it will move the Backup to the `Queued` phase and set `QueuePosition`.
|
||||
|
||||
In addition, this reconciler will periodically reconcile all queued backups (on some configurable time interval) and if there is a runnable backup, remove it from the queue, update `QueuePosition` for any queued backups behind it, and update its phase to `ReadyToStart`.
|
||||
|
||||
Queued backups will be reconciled in order based on `QueuePosition`, so the first runnable backup found will be processed. A backup is runnable if both of the following conditions are true:
|
||||
1) The total number of backups either `InProgress` or `ReadyToStart` is less than the configured number of concurrent backups.
|
||||
2) The backup has no overlap with any backups currently `InProgress` or `ReadyToStart` or with any `Queued` backups with a higher (i.e. closer to 1) queue position than this backup.
|
||||
|
||||
### Updates to Backup controller
|
||||
|
||||
The current `backupReconciler` will change its reconciling rules. Instead of watching and reconciling New backups, it will reconcile `ReadyToStart` backups. In addition, it will be configured to run in parallel by setting `MaxConcurrentReconciles` based on the `concurrent-backups` server arg.
|
||||
|
||||
The startup (and shutdown) of the ItemBlock worker pool will be moved from reconciler startup to the backup reconcile, which will give each running backup its own dedicated worker pool. The per-backup worker pool will will use the existing `--item-block-worker-count` installer/server arg. This means that the maximum number of ItemBlock workers for the entire Velero pod will be the ItemBlock worker count multiplied by concurrentBackups. For example, if concurrentBackups is 5, and itemBlockWorkerCount is 6, then there will be, at most, 30 worker threads active, 5 dedicated to each InProgress backup, but this maximum will only be achieved when the maximum number of backups are InProgress. This also means that each InProgress backup will have a dedicated ItemBlock input channel with the same fixed buffer size.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### New Install/Server configuration args
|
||||
|
||||
A new install/server arg, `concurrent-backups` will be added. This will be an int-valued field specifying the number of backups which may be processed concurrently (with phase `InProgress`). If not specified, the default value of 1 will be used.
|
||||
|
||||
### Consideration of backup overlap and concurrent backup processing
|
||||
|
||||
The primary consideration for running additional backups concurrently is the configured `concurrent-backups` parameter. If the total number of `InProgress` and `ReadyToStart` backups is equal to `concurrent-backups` then any `Queued` backups will remain in the queue.
|
||||
|
||||
The second consideration is backup overlap. In order to prevent interaction between running backups (particularly around volume backup and pod hooks), we cannot allow two overlapping backups to run at the same time. For now, we will define overlap broadly -- requiring that two concurrent backups don't include any of the same namespaces. A backup for `ns1` can run concurrently with a backup for `ns2`, but a backup for `[ns1,ns2]` cannot run concurrently with a backup for `ns1`. One consequence of this approach is that a backup which includes all namespaces (even if further filtered by resource or label) cannot run concurrently with *any other backup*.
|
||||
|
||||
When determining which queued backup to run next, velero will look for the next queued backup which has no overlap with any InProgress backup or any Queued backup ahead of it. The reason we need to consider queued as well as running backups for overlap detection is as follows.
|
||||
|
||||
Consider the following scenario. These are the current not-completed backups (ordered from oldest to newest)
|
||||
1. backup1, includedNamespaces: [ns1, ns2], phase: InProgress
|
||||
2. backup2, includedNamespaces: [ns2, ns3, ns5], phase: Queued, QueuePosition: 1
|
||||
3. backup3, includedNamespaces: [ns4, ns3], phase: Queued, QueuePosition: 2
|
||||
4. backup4, includedNamespaces: [ns5, ns6], phase: Queued, QueuePosition: 2
|
||||
5. backup5, includedNamespaces: [ns8, ns9], phase: Queued, QueuePosition: 3
|
||||
|
||||
Assuming `concurrent-backups` is 2, on the next reconcile, Velero will be able to start a second backup if there is one with no overlap. `backup2` cannot run, since `ns2` overlaps between it and the running `backup1`. If we only considered running overlap (and not queued overlap), then `backup3` could run now. It conflicts with the queued `backup2` on `ns3` but it does not conflict with the running backup. However, if it runs now, then when `backup1` completes, then `backup2` still can't run (since it now overlaps with running `backup3`on `ns3`), so `backup4` starts instead. Now when `backup3` completes, `backup2` still can't run (since it now conflicts with `backup4` on `ns5`). This means that even though it was the second backup created, it's the fourth to run -- providing worse time to completion than without parallel backups. If a queued backup has a large number of namespaces (a full-cluster backup for example), it would never run as long as new single-namespace backups keep being added to the queue.
|
||||
|
||||
To resolve this problem we consider both running backups as well as backups ahead in the queue when resolving overlap conflicts. In the above scenario, `backup2` can't run yet since it overlaps with the running backup on `ns2`. In addition, `backup3` and `backup4` also can't run yet since they overlap with queued `backup2`. Therefore, `backup5` will run now. Once `backup1` completes, `backup2` will be free to run.
|
||||
|
||||
### Backup CRD changes
|
||||
|
||||
New Backup phases:
|
||||
```go
|
||||
const (
|
||||
// BackupPhaseQueued means the backup has been added to the
|
||||
// queue by the BackupQueueReconciler.
|
||||
BackupPhaseQueued BackupPhase = "Queued"
|
||||
|
||||
// BackupPhaseReadyToStart means the backup has been removed from the
|
||||
// queue by the BackupQueueReconciler and is ready to start.
|
||||
BackupPhaseReadyToStart BackupPhase = "ReadyToStart"
|
||||
)
|
||||
```
|
||||
|
||||
In addition, a new Status field, `queuePosition`, will be added to track the backup's current position in the queue.
|
||||
```go
|
||||
// QueuePosition is the position held by the backup in the queue.
|
||||
// QueuePosition=1 means this backup is the next to be considered.
|
||||
// Only relevant when Phase is "Queued"
|
||||
// +optional
|
||||
QueuePosition int `json:"queuePosition,omitempty"`
|
||||
```
|
||||
|
||||
### New Controller: `backupQueueReconciler`
|
||||
|
||||
A new reconciler will be added, `backupQueueReconciler` which will reconcile backups under these conditions:
|
||||
1) Watching Create/Update for backups in `New` (or empty) phase
|
||||
2) Watching for Backup phase transition from `InProgress` to something else to reconcile all `Queued` backups
|
||||
2) Watching for Backup phase transition from `New` (or empty) to `Queued` to reconcile all `Queued` backups
|
||||
2) Periodic reconcile of `Queued` backups to handle backups queued at server startup as well as to make sure we never have a situation where backups are queued indefinitely because of a race condition or was otherwise missed in the reconcile on prior backup completion.
|
||||
|
||||
The reconciler will be set up as follows -- note that New backups are reconciled on Create/Update, while Queued backups are reconciled when an InProgress backup moves on to another state or when a new backup moves to the Queued state. We also reconcile Queued backups periodically to handle the case of a Velero pod restart with Queued backups, as well as to handle possible edge cases where a queued backup doesn't get moved out of the queue at the point of backup completion or an error occurs during a prior Queued backup reconcile.
|
||||
|
||||
```go
|
||||
func (c *backupOperationsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// only consider Queued backups, order by QueuePosition
|
||||
gp := kube.NewGenericEventPredicate(func(object client.Object) bool {
|
||||
backup := object.(*velerov1api.Backup)
|
||||
return (backup.Status.Phase == velerov1api.BackupPhaseQueued)
|
||||
})
|
||||
s := kube.NewPeriodicalEnqueueSource(c.logger.WithField("controller", constant.ControllerBackupOperations), mgr.GetClient(), &velerov1api.BackupList{}, c.frequency, kube.PeriodicalEnqueueSourceOption{
|
||||
Predicates: []predicate.Predicate{gp},
|
||||
OrderFunc: queuePositionOrderFunc,
|
||||
})
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.Backup{}, builder.WithPredicates(predicate.Funcs{
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
backup := ue.ObjectNew.(*velerov1api.Backup)
|
||||
return backup.Status.Phase == "" || backup.status.Phase == velerov1api.BackupPhaseNew
|
||||
},
|
||||
CreateFunc: func(event.CreateEvent) bool {
|
||||
return backup.Status.Phase == "" || backup.status.Phase == velerov1api.BackupPhaseNew
|
||||
},
|
||||
DeleteFunc: func(de event.DeleteEvent) bool {
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(ge event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
})).
|
||||
Watch(
|
||||
&source.Kind{Type: &velerov1api.Backup{}},
|
||||
&handler.EnqueueRequestsFromMapFunc{
|
||||
ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request {
|
||||
backupList := velerov1api.BackupList{}
|
||||
if err := p.List(ctx, backupList); err != nil {
|
||||
p.logger.WithError(err).Error("error listing backups")
|
||||
return
|
||||
}
|
||||
requests = []reconcile.request{}
|
||||
// filter backup list by Phase=queued
|
||||
// sort backup list by queuePosition
|
||||
return requests
|
||||
}),
|
||||
},
|
||||
builder.WithPredicates(predicate.Funcs{
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
oldBackup := ue.ObjectOld.(*velerov1api.Backup)
|
||||
newBackup := ue.ObjectNew.(*velerov1api.Backup)
|
||||
return oldBackup.Status.Phase == velerov1api.BackupPhaseInProgress &&
|
||||
newBackup.Status.Phase != velerov1api.BackupPhaseInProgress ||
|
||||
oldBackup.Status.Phase != velerov1api.BackupPhaseQueued &&
|
||||
newBackup.Status.Phase == velerov1api.BackupPhaseQueued
|
||||
},
|
||||
CreateFunc: func(event.CreateEvent) bool {
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(de event.DeleteEvent) bool {
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(ge event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
}).
|
||||
WatchesRawSource(s).
|
||||
Named(constant.ControllerBackupQueue).
|
||||
Complete(c)
|
||||
}
|
||||
```
|
||||
|
||||
New backups will be queued: Phase will be set to `Queued`, and `QueuePosition` will be set to a int value incremented from the highest current `QueuePosition` value among Queued backups.
|
||||
|
||||
Queued backups will be removed from the queue if runnable:
|
||||
1) If the total number of backups either InProgress or ReadyToStart is greater than or equal to the concurrency limit, then exit without removing from the queue.
|
||||
2) If the current backup overlaps with any InProgress, ReadyToStart, or Queued backup with `QueuePosition < currentBackup.QueuePosition` then exit without removing from the queue.
|
||||
3) If we get here, the backup is runnable. To resolve a potential race condition where an InProgress backup completes between reconciling the backup with QueuePosition `n-1` and reconciling the current backup with QueuePosition `n`, we also check to see whether there are any runnable backups in the queue ahead of this one. The only time this will happen is if a backup completes immediately before reconcile starts which either frees up a concurrency slot or removes a namespace conflict. In this case, we don't want to run the current backup since the one ahead of this one in the queue (which was recently passed over before the InProgress backup completed) must run first. In this case, exit without removing from the queue.
|
||||
4) If we get here, remove the backup from the queue by setting Phase to `ReadyToStart` and `QueuePosition` to zero. Decrement the `QueuePosition` of any other Queued backups with a `QueuePosition` higher than the current backup's queue position prior to dequeuing. At this point, the backup reconciler will start the backup.
|
||||
|
||||
`if len(inProgressBackups)+len(pendingStartBackups) >= concurrentBackups`
|
||||
|
||||
```
|
||||
switch original.Status.Phase {
|
||||
case "", velerov1api.BackupPhaseNew:
|
||||
// enqueue backup -- set phase=Queued, set queuePosition=maxCurrentQueuePosition+1
|
||||
}
|
||||
// We should only ever get these events when added in order by the periodical enqueue source
|
||||
// so as long as the current backup has not conflicts ahead of it or running, we should be good to
|
||||
// dequeue
|
||||
case "", velerov1api.BackupPhaseQueued:
|
||||
// list backups, filter on Queued, ReadyToStart, and InProgress
|
||||
// if number of InProgress backups + number of ReadyToStart backups >= concurrency limit, exit
|
||||
// generate list of all namespaces included in InProgress, ReadyToStart, and Queued backups with
|
||||
// queuePosition < backup.Status.QueuePosition
|
||||
// if overlap found, exit
|
||||
// check backups ahead of this one in the queue for runnability. If any are runnable, exit
|
||||
// dequeue backup: set Phase to ReadyToStart, QueuePosition to 0, and decrement QueuePosition
|
||||
// for all QueuedBackups behind this one in the queue
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The queue controller will run as a single reconciler thread, so we will not need to deal with concurrency issues when moving backups from New to Queued or from Queued to ReadyToStart, and all of the updates to QueuePosition will be from a single thread.
|
||||
|
||||
### Updates to Backup controller
|
||||
|
||||
The Reconcile logic will be updated to respond to ReadyToStart backups instead of New backups:
|
||||
|
||||
```
|
||||
@@ -234,8 +234,8 @@ func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
// InProgress, we still need this check so we can return nil to indicate we've finished processing
|
||||
// this key (even though it was a no-op).
|
||||
switch original.Status.Phase {
|
||||
- case "", velerov1api.BackupPhaseNew:
|
||||
- // only process new backups
|
||||
+ case velerov1api.BackupPhaseReadyToStart:
|
||||
+ // only process ReadyToStart backups
|
||||
default:
|
||||
b.logger.WithFields(logrus.Fields{
|
||||
"backup": kubeutil.NamespaceAndName(original),
|
||||
```
|
||||
|
||||
In addition, it will be configured to run in parallel by setting `MaxConcurrentReconciles` based on the `concurrent-backups` server arg.
|
||||
|
||||
```
|
||||
@@ -149,6 +149,9 @@ func NewBackupReconciler(
|
||||
func (b *backupReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.Backup{}).
|
||||
+ WithOptions(controller.Options{
|
||||
+ MaxConcurrentReconciles: concurrentBackups,
|
||||
+ }).
|
||||
Named(constant.ControllerBackup).
|
||||
Complete(b)
|
||||
}
|
||||
```
|
||||
|
||||
The controller-runtime core reconciler logic already prevents the same resource from being reconciled by two different reconciler threads, so we don't need to worry about concurrency issues at the controller level.
|
||||
|
||||
The workerPool reference will be moved from the backupReconciler to the backupRequest, since this will now be backup-specific, and the initialization code for the worker pool will be moved from the reconciler init into the backup reconcile. This worker pool will be shut down upon exiting the Reconcile method.
|
||||
|
||||
### Resilience to restart of velero pod
|
||||
|
||||
The new backup phases (`Queued` and `ReadyToStart`) will be resilient to velero pod restarts. If the velero pod crashes or is restarted, only backups in the `InProgress` phase will be failed, so there is no change to current behavior. Queued backups will retain their queue position on restart, and ReadyToStart backups will move to InProgress when reconciled.
|
||||
|
||||
### Observability
|
||||
|
||||
#### Logging
|
||||
|
||||
When a backup is dequeued, an info log message will also include the wait time, calculated as `now - creationTimestamp`. When a backup is passed over due to overlap, an info log message will indicate which namespaces were in conflict.
|
||||
|
||||
#### Velero CLI
|
||||
|
||||
The `velero backup describe` output will include the current queue position for queued backups.
|
||||
18
go.mod
18
go.mod
@@ -2,8 +2,6 @@ module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
@@ -43,9 +41,10 @@ require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/mod v0.29.0
|
||||
golang.org/x/mod v0.26.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/text v0.31.0
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/api v0.241.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
@@ -181,14 +180,13 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
32
go.sum
32
go.sum
@@ -794,8 +794,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -833,8 +833,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -880,8 +880,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -908,8 +908,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -973,14 +973,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -990,8 +990,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1051,8 +1051,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.24.11-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.24-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..f6205aa3c 100644
|
||||
index 5f939c481..6ae17f4a1 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,31 @@ require (
|
||||
@@ -14,13 +14,13 @@ index 5f939c481..f6205aa3c 100644
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/crypto v0.45.0
|
||||
+ golang.org/x/net v0.47.0
|
||||
+ golang.org/x/crypto v0.36.0
|
||||
+ golang.org/x/net v0.38.0
|
||||
+ golang.org/x/oauth2 v0.28.0
|
||||
+ golang.org/x/sync v0.18.0
|
||||
+ golang.org/x/sys v0.38.0
|
||||
+ golang.org/x/term v0.37.0
|
||||
+ golang.org/x/text v0.31.0
|
||||
+ golang.org/x/sync v0.12.0
|
||||
+ golang.org/x/sys v0.31.0
|
||||
+ golang.org/x/term v0.30.0
|
||||
+ golang.org/x/text v0.23.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
@@ -64,11 +64,11 @@ index 5f939c481..f6205aa3c 100644
|
||||
)
|
||||
|
||||
-go 1.18
|
||||
+go 1.24.0
|
||||
+go 1.23.0
|
||||
+
|
||||
+toolchain go1.24.11
|
||||
+toolchain go1.23.7
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..4a37e7ac7 100644
|
||||
index 026e1d2fa..805792055 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,23 +1,24 @@
|
||||
@@ -170,8 +170,8 @@ index 026e1d2fa..4a37e7ac7 100644
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
+golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -181,8 +181,8 @@ index 026e1d2fa..4a37e7ac7 100644
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
@@ -194,8 +194,8 @@ index 026e1d2fa..4a37e7ac7 100644
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -205,21 +205,21 @@ index 026e1d2fa..4a37e7ac7 100644
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
||||
52
pkg/builder/priority_class_builder.go
Normal file
52
pkg/builder/priority_class_builder.go
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
schedulingv1api "k8s.io/api/scheduling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type PriorityClassBuilder struct {
|
||||
object *schedulingv1api.PriorityClass
|
||||
}
|
||||
|
||||
func ForPriorityClass(name string) *PriorityClassBuilder {
|
||||
return &PriorityClassBuilder{
|
||||
object: &schedulingv1api.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PriorityClassBuilder) Value(value int) *PriorityClassBuilder {
|
||||
p.object.Value = int32(value)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PriorityClassBuilder) PreemptionPolicy(policy string) *PriorityClassBuilder {
|
||||
preemptionPolicy := corev1api.PreemptionPolicy(policy)
|
||||
p.object.PreemptionPolicy = &preemptionPolicy
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PriorityClassBuilder) Result() *schedulingv1api.PriorityClass {
|
||||
return p.object
|
||||
}
|
||||
@@ -113,11 +113,7 @@ var (
|
||||
"datauploads.velero.io",
|
||||
"persistentvolumes",
|
||||
"persistentvolumeclaims",
|
||||
"clusterroles",
|
||||
"roles",
|
||||
"serviceaccounts",
|
||||
"clusterrolebindings",
|
||||
"rolebindings",
|
||||
"secrets",
|
||||
"configmaps",
|
||||
"limitranges",
|
||||
|
||||
@@ -275,7 +275,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
log.WithError(err).Warn("Failed to get keepLatestMaintenanceJobs from ConfigMap, using CLI parameter value")
|
||||
}
|
||||
|
||||
if err := maintenance.DeleteOldJobs(r.Client, req.Name, keepJobs, log); err != nil {
|
||||
if err := maintenance.DeleteOldJobs(r.Client, *backupRepo, keepJobs, log); err != nil {
|
||||
log.WithError(err).Warn("Failed to delete old maintenance jobs")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,8 +381,13 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
diag += fmt.Sprintf("error getting backup vs %s, err: %v\n", backupVSName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -392,7 +397,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
@@ -404,7 +409,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
||||
}
|
||||
|
||||
if vs != nil {
|
||||
diag += csi.DiagnoseVS(vs)
|
||||
diag += csi.DiagnoseVS(vs, events)
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil && *vs.Status.BoundVolumeSnapshotContentName != "" {
|
||||
if vsc, err := e.csiSnapshotClient.VolumeSnapshotContents().Get(ctx, *vs.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -1288,6 +1288,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1313,6 +1314,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1341,6 +1343,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1359,6 +1362,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1404,6 +1408,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1419,6 +1424,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1436,6 +1442,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-vs-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -1633,6 +1640,74 @@ PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&backupPVCWithVolumeName,
|
||||
&backupPV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-6"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
},
|
||||
snapshotClientObj: []runtime.Object{
|
||||
&backupVSWithVSC,
|
||||
&backupVSC,
|
||||
},
|
||||
expected: `begin diagnose CSI exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-6, message message-6
|
||||
PVC velero/fake-backup, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
VS velero/fake-backup, bind to fake-vsc, readyToUse false, errMessage fake-vs-message
|
||||
VS event reason reason-4, message message-4
|
||||
VSC fake-vsc, readyToUse false, errMessage fake-vsc-message, handle
|
||||
end diagnose CSI exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -287,8 +287,13 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
@@ -298,7 +303,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
diag += kube.DiagnosePVC(pvc)
|
||||
diag += kube.DiagnosePVC(pvc, events)
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}); err != nil {
|
||||
|
||||
@@ -549,6 +549,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -574,6 +575,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -602,6 +604,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -620,6 +623,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
UID: "fake-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
@@ -758,6 +762,60 @@ Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&restorePV,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-5"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-5, message message-5
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PVC event reason reason-3, message message-3
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -251,8 +251,13 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
|
||||
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
|
||||
}
|
||||
|
||||
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
diag += fmt.Sprintf("error listing events, err: %v\n", err)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
diag += kube.DiagnosePod(pod)
|
||||
diag += kube.DiagnosePod(pod, events)
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if err := nodeagent.KbClientIsRunningInNode(ctx, ownerObject.Namespace, pod.Spec.NodeName, e.kubeClient); err != nil {
|
||||
|
||||
@@ -466,6 +466,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -491,6 +492,7 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
@@ -587,6 +589,48 @@ end diagnose pod volume exposer`,
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "with events",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&nodeAgentPod,
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "other-namespace", Name: "event-3"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
&corev1api.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: velerov1.DefaultNamespace, Name: "event-4"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
Pod event reason reason-2, message message-2
|
||||
Pod event reason reason-4, message message-4
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -143,6 +143,10 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int
|
||||
return nil, errors.Errorf("data is not available in config map %s", configName)
|
||||
}
|
||||
|
||||
if len(cm.Data) > 1 {
|
||||
return nil, errors.Errorf("more than one keys are found in ConfigMap %s's data. only expect one", configName)
|
||||
}
|
||||
|
||||
jsonString := ""
|
||||
for _, v := range cm.Data {
|
||||
jsonString = v
|
||||
|
||||
@@ -249,6 +249,7 @@ func TestGetConfigs(t *testing.T) {
|
||||
cmWithValidData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"loadConcurrency\":{\"globalConfig\": 5}}").Result()
|
||||
cmWithPriorityClass := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"high-priority\"}").Result()
|
||||
cmWithPriorityClassAndOther := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key", "{\"priorityClassName\": \"low-priority\", \"loadConcurrency\":{\"globalConfig\": 3}}").Result()
|
||||
cmWithMultipleKeysInData := builder.ForConfigMap("fake-ns", "node-agent-config").Data("fake-key-1", "{}", "fake-key-2", "{}").Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -331,6 +332,14 @@ func TestGetConfigs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ConfigMap's Data has more than one key",
|
||||
namespace: "fake-ns",
|
||||
kubeClientObj: []runtime.Object{
|
||||
cmWithMultipleKeysInData,
|
||||
},
|
||||
expectErr: "more than one keys are found in ConfigMap node-agent-config's data. only expect one",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,11 +17,11 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,11 +17,11 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
proto "github.com/vmware-tanzu/velero/pkg/plugin/generated"
|
||||
|
||||
@@ -17,10 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
|
||||
@@ -92,18 +92,12 @@ func newRestorer(
|
||||
|
||||
_, _ = pvrInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj any) {
|
||||
pvr := newObj.(*velerov1api.PodVolumeRestore)
|
||||
pvrOld := oldObj.(*velerov1api.PodVolumeRestore)
|
||||
|
||||
UpdateFunc: func(_, obj any) {
|
||||
pvr := obj.(*velerov1api.PodVolumeRestore)
|
||||
if pvr.GetLabels()[velerov1api.RestoreUIDLabel] != string(restore.UID) {
|
||||
return
|
||||
}
|
||||
|
||||
if pvr.Status.Phase == pvrOld.Status.Phase {
|
||||
return
|
||||
}
|
||||
|
||||
if pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseCompleted || pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseFailed || pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseCanceled {
|
||||
r.resultsLock.Lock()
|
||||
defer r.resultsLock.Unlock()
|
||||
|
||||
@@ -32,11 +32,13 @@ import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -68,11 +70,22 @@ func GenerateJobName(repo string) string {
|
||||
}
|
||||
|
||||
// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs
|
||||
func DeleteOldJobs(cli client.Client, repo string, keep int, logger logrus.FieldLogger) error {
|
||||
func DeleteOldJobs(cli client.Client, repo velerov1api.BackupRepository, keep int, logger logrus.FieldLogger) error {
|
||||
logger.Infof("Start to delete old maintenance jobs. %d jobs will be kept.", keep)
|
||||
// Get the maintenance job list by label
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -339,10 +352,17 @@ func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string,
|
||||
// and then return the maintenance jobs' status in the range of limit
|
||||
func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||
jobList := &batchv1api.JobList{}
|
||||
err := cli.List(context.TODO(), jobList, &client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
},
|
||||
client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name}),
|
||||
err := cli.List(
|
||||
context.TODO(),
|
||||
jobList,
|
||||
&client.ListOptions{
|
||||
Namespace: repo.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -558,7 +578,7 @@ func buildJob(
|
||||
}
|
||||
|
||||
podLabels := map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
}
|
||||
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
@@ -588,7 +608,7 @@ func buildJob(
|
||||
Name: GenerateJobName(repo.Name),
|
||||
Namespace: repo.Namespace,
|
||||
Labels: map[string]string{
|
||||
RepositoryNameLabel: repo.Name,
|
||||
RepositoryNameLabel: velerolabel.GetValidName(repo.Name),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerolabel "github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/provider"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
@@ -48,7 +49,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
)
|
||||
|
||||
func TestGenerateJobName1(t *testing.T) {
|
||||
func TestGenerateJobName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
repo string
|
||||
expectedStart string
|
||||
@@ -82,59 +83,62 @@ func TestGenerateJobName1(t *testing.T) {
|
||||
}
|
||||
func TestDeleteOldJobs(t *testing.T) {
|
||||
// Set up test repo and keep value
|
||||
repo := "test-repo"
|
||||
keep := 2
|
||||
|
||||
// Create some maintenance jobs for testing
|
||||
var objs []client.Object
|
||||
// Create a newer job
|
||||
newerJob := &batchv1api.Job{
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
keep := 1
|
||||
|
||||
jobArray := []client.Object{
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-0",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
&batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
newJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-new",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, newerJob)
|
||||
// Create older jobs
|
||||
for i := 2; i <= 3; i++ {
|
||||
olderJob := &batchv1api.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("job%d", i),
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{RepositoryNameLabel: repo},
|
||||
CreationTimestamp: metav1.Time{
|
||||
Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour),
|
||||
},
|
||||
},
|
||||
Spec: batchv1api.JobSpec{},
|
||||
}
|
||||
objs = append(objs, olderJob)
|
||||
}
|
||||
// Create a fake Kubernetes client
|
||||
|
||||
// Create a fake Kubernetes client with 2 jobs.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = batchv1api.AddToScheme(scheme)
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(jobArray...).Build()
|
||||
|
||||
// Create a new job
|
||||
require.NoError(t, cli.Create(context.TODO(), newJob))
|
||||
|
||||
// Call the function
|
||||
err := DeleteOldJobs(cli, repo, keep, velerotest.NewLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteOldJobs(cli, *repo, keep, velerotest.NewLogger()))
|
||||
|
||||
// Get the remaining jobs
|
||||
jobList := &batchv1api.JobList{}
|
||||
err = cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cli.List(t.Context(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo.Name})))
|
||||
|
||||
// We expect the number of jobs to be equal to 'keep'
|
||||
assert.Len(t, jobList.Items, keep)
|
||||
|
||||
// We expect that the oldest jobs were deleted
|
||||
// Job3 should not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[2])
|
||||
|
||||
// Job2 should also not be present in the remaining list
|
||||
assert.NotContains(t, jobList.Items, objs[1])
|
||||
// Only the new created job should be left.
|
||||
assert.Equal(t, jobList.Items[0].Name, newJob.Name)
|
||||
}
|
||||
|
||||
func TestWaitForJobComplete(t *testing.T) {
|
||||
@@ -571,7 +575,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
repo := &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: "fake-repo",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
BackupStorageLocation: "default",
|
||||
@@ -595,7 +599,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
}
|
||||
@@ -604,7 +608,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job1",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -624,7 +628,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job2",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -645,7 +649,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job3",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -665,7 +669,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job4",
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{RepositoryNameLabel: "fake-repo"},
|
||||
Labels: map[string]string{RepositoryNameLabel: velerolabel.GetValidName(repo.Name)},
|
||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour * 3)},
|
||||
},
|
||||
Status: batchv1api.JobStatus{
|
||||
@@ -698,7 +702,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||
{
|
||||
name: "list job error",
|
||||
runtimeScheme: schemeFail,
|
||||
expectedError: "error listing maintenance job for repo fake-repo: no kind is registered for the type v1.JobList in scheme",
|
||||
expectedError: "error listing maintenance job for repo label with more than 63 characters should be modified: no kind is registered for the type v1.JobList in scheme",
|
||||
},
|
||||
{
|
||||
name: "job not exist",
|
||||
@@ -943,6 +947,7 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedSecurityContext *corev1api.SecurityContext
|
||||
expectedPodSecurityContext *corev1api.PodSecurityContext
|
||||
expectedImagePullSecrets []corev1api.LocalObjectReference
|
||||
backupRepository *velerov1api.BackupRepository
|
||||
}{
|
||||
{
|
||||
name: "Valid maintenance job without third party labels",
|
||||
@@ -1060,6 +1065,64 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedJobName: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
|
||||
m: &velerotypes.JobConfigs{
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "128Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "256Mi",
|
||||
},
|
||||
},
|
||||
deploy: deploy2,
|
||||
logLevel: logrus.InfoLevel,
|
||||
logFormat: logging.NewFormatFlag(),
|
||||
expectedError: false,
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodLabel: map[string]string{
|
||||
RepositoryNameLabel: velerolabel.GetValidName("label with more than 63 characters should be modified"),
|
||||
"azure.workload.identity/use": "fake-label-value",
|
||||
},
|
||||
expectedSecurityContext: nil,
|
||||
expectedPodSecurityContext: nil,
|
||||
expectedImagePullSecrets: []corev1api.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
backupRepository: &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "label with more than 63 characters should be modified",
|
||||
},
|
||||
Spec: velerov1api.BackupRepositorySpec{
|
||||
VolumeNamespace: "test-123",
|
||||
RepositoryType: "kopia",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
param := provider.RepoParam{
|
||||
@@ -1083,6 +1146,10 @@ func TestBuildJob(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.backupRepository != nil {
|
||||
param.BackupRepo = tc.backupRepository
|
||||
}
|
||||
|
||||
// Create a fake clientset with resources
|
||||
objs := []runtime.Object{param.BackupLocation, param.BackupRepo}
|
||||
|
||||
|
||||
@@ -185,8 +185,8 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
||||
securityContextSet = true
|
||||
}
|
||||
}
|
||||
// if securityContext configmap is unavailable but first container in pod has a SecurityContext set, then copy this security context
|
||||
if !securityContextSet && len(pod.Spec.Containers) != 0 && pod.Spec.Containers[0].SecurityContext != nil {
|
||||
// if first container in pod has a SecurityContext set, then copy this security context
|
||||
if len(pod.Spec.Containers) != 0 && pod.Spec.Containers[0].SecurityContext != nil {
|
||||
securityContext = *pod.Spec.Containers[0].SecurityContext.DeepCopy()
|
||||
securityContextSet = true
|
||||
}
|
||||
|
||||
@@ -17,20 +17,15 @@ limitations under the License.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
)
|
||||
@@ -91,46 +86,13 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if pvc.Annotations == nil {
|
||||
pvc.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
log := p.logger.WithFields(map[string]any{
|
||||
"kind": pvc.Kind,
|
||||
"namespace": pvc.Namespace,
|
||||
"name": pvc.Name,
|
||||
})
|
||||
|
||||
// Handle selected node annotation
|
||||
node, ok := pvc.Annotations[AnnSelectedNode]
|
||||
if ok {
|
||||
// fetch node mapping from configMap
|
||||
newNode, err := getNewNodeFromConfigMap(p.configMapClient, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(newNode) != 0 {
|
||||
// Check whether the mapped node exists first.
|
||||
exists, err := isNodeExist(p.nodeClient, newNode)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking %s's mapped node %s existence", node, newNode)
|
||||
}
|
||||
if !exists {
|
||||
log.Warnf("Selected-node's mapped node doesn't exist: source: %s, dest: %s. Please check the ConfigMap with label velero.io/change-pvc-node-selector.", node, newNode)
|
||||
}
|
||||
|
||||
// set node selector
|
||||
// We assume that node exist for node-mapping
|
||||
pvc.Annotations[AnnSelectedNode] = newNode
|
||||
log.Infof("Updating selected-node to %s from %s", newNode, node)
|
||||
} else {
|
||||
log.Info("Clearing PVC selected-node annotation")
|
||||
delete(pvc.Annotations, AnnSelectedNode)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove other annotations
|
||||
// Remove PVC annotations
|
||||
removePVCAnnotations(
|
||||
&pvc,
|
||||
[]string{
|
||||
@@ -138,6 +100,7 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
AnnBoundByController,
|
||||
AnnStorageProvisioner,
|
||||
AnnBetaStorageProvisioner,
|
||||
AnnSelectedNode,
|
||||
velerov1api.VolumeSnapshotLabel,
|
||||
velerov1api.DataUploadNameAnnotation,
|
||||
},
|
||||
@@ -167,34 +130,6 @@ func (p *PVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func getNewNodeFromConfigMap(client corev1client.ConfigMapInterface, node string) (string, error) {
|
||||
// fetch node mapping from configMap
|
||||
config, err := common.GetPluginConfig(common.PluginKindRestoreItemAction, "velero.io/change-pvc-node-selector", client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
// there is no node mapping defined for change-pvc-node
|
||||
// so we will return empty new node
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return config.Data[node], nil
|
||||
}
|
||||
|
||||
// isNodeExist check if node resource exist or not
|
||||
func isNodeExist(nodeClient corev1client.NodeInterface, name string) (bool, error) {
|
||||
_, err := nodeClient.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func removePVCAnnotations(pvc *corev1api.PersistentVolumeClaim, remove []string) {
|
||||
for k := range pvc.Annotations {
|
||||
if util.Contains(remove, k) {
|
||||
|
||||
@@ -17,11 +17,9 @@ limitations under the License.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
@@ -42,105 +40,57 @@ import (
|
||||
// desired result.
|
||||
func TestPVCActionExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
configMap *corev1api.ConfigMap
|
||||
node *corev1api.Node
|
||||
newNode *corev1api.Node
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr error
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "a valid mapping for a persistent volume claim is applied correctly",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
newNode: builder.ForNode("dest-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "dest-node"),
|
||||
).Result(),
|
||||
},
|
||||
{
|
||||
name: "when no config map exists for the plugin, the item is returned without node selector",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/some-other-plugin", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when no node-mappings exist in the plugin config map, the item is returned without node selector",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when persistent volume claim has no node selector, the item is returned as-is",
|
||||
name: "a persistent volume claim with no annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node", "dest-node").
|
||||
Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
},
|
||||
{
|
||||
name: "when persistent volume claim's node-selector has no mapping in the config map, the item is returned without node selector",
|
||||
name: "a persistent volume claim with selected-node annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
configMap: builder.ForConfigMap("velero", "change-pvc-node").
|
||||
ObjectMeta(builder.WithLabels("velero.io/plugin-config", "", "velero.io/change-pvc-node-selector", "RestoreItemAction")).
|
||||
Data("source-node-1", "dest-node").
|
||||
Result(),
|
||||
node: builder.ForNode("source-node").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(builder.WithAnnotationsMap(map[string]string{})).Result(),
|
||||
},
|
||||
{
|
||||
name: "a persistent volume claim with other annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("other-anno-1", "other-value-1", "other-anno-2", "other-value-2"),
|
||||
).Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(
|
||||
builder.WithAnnotations("other-anno-1", "other-value-1", "other-anno-2", "other-value-2"),
|
||||
).Result(),
|
||||
},
|
||||
{
|
||||
name: "a persistent volume claim with other annotation and selected-node annotation",
|
||||
pvc: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("other-anno", "other-value", "volume.kubernetes.io/selected-node", "source-node"),
|
||||
).Result(),
|
||||
want: builder.ForPersistentVolumeClaim("source-ns", "pvc-1").ObjectMeta(
|
||||
builder.WithAnnotations("other-anno", "other-value"),
|
||||
).Result(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset()
|
||||
logger := logrus.StandardLogger()
|
||||
buf := bytes.Buffer{}
|
||||
logrus.SetOutput(&buf)
|
||||
|
||||
a := NewPVCAction(
|
||||
logger,
|
||||
velerotest.NewLogger(),
|
||||
clientset.CoreV1().ConfigMaps("velero"),
|
||||
clientset.CoreV1().Nodes(),
|
||||
)
|
||||
|
||||
// set up test data
|
||||
if tc.configMap != nil {
|
||||
_, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(t.Context(), tc.configMap, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tc.node != nil {
|
||||
_, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.node, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tc.newNode != nil {
|
||||
_, err := clientset.CoreV1().Nodes().Create(t.Context(), tc.newNode, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -156,10 +106,6 @@ func TestPVCActionExecute(t *testing.T) {
|
||||
// execute method under test
|
||||
res, err := a.Execute(input)
|
||||
|
||||
// Make sure mapped selected-node exists.
|
||||
logOutput := buf.String()
|
||||
assert.NotContains(t, logOutput, "Selected-node's mapped node doesn't exist")
|
||||
|
||||
// validate for both error and non-error cases
|
||||
switch {
|
||||
case tc.wantErr != nil:
|
||||
|
||||
@@ -69,9 +69,8 @@ type Request struct {
|
||||
}
|
||||
|
||||
type restoredItemStatus struct {
|
||||
action string
|
||||
itemExists bool
|
||||
createdName string // Actual name assigned by K8s for GenerateName resources
|
||||
action string
|
||||
itemExists bool
|
||||
}
|
||||
|
||||
// GetItemOperationsList returns ItemOperationsList, initializing it if necessary
|
||||
@@ -88,15 +87,9 @@ func (r *Request) GetItemOperationsList() *[]*itemoperation.RestoreOperation {
|
||||
func (r *Request) RestoredResourceList() map[string][]string {
|
||||
resources := map[string][]string{}
|
||||
for i, item := range r.RestoredItems {
|
||||
// Use createdName if available (GenerateName case), otherwise itemKey.name
|
||||
name := i.name
|
||||
if item.createdName != "" {
|
||||
name = item.createdName
|
||||
}
|
||||
|
||||
entry := name
|
||||
entry := i.name
|
||||
if i.namespace != "" {
|
||||
entry = fmt.Sprintf("%s/%s", i.namespace, name)
|
||||
entry = fmt.Sprintf("%s/%s", i.namespace, i.name)
|
||||
}
|
||||
entry = fmt.Sprintf("%s(%s)", entry, item.action)
|
||||
resources[i.resource] = append(resources[i.resource], entry)
|
||||
|
||||
@@ -741,7 +741,7 @@ func (ctx *restoreContext) processSelectedResource(
|
||||
namespace: ns.Namespace,
|
||||
name: ns.Name,
|
||||
}
|
||||
ctx.restoredItems[itemKey] = restoredItemStatus{action: ItemRestoreResultCreated, itemExists: true, createdName: ns.Name}
|
||||
ctx.restoredItems[itemKey] = restoredItemStatus{action: ItemRestoreResultCreated, itemExists: true}
|
||||
}
|
||||
|
||||
// Keep track of namespaces that we know exist so we don't
|
||||
@@ -1142,7 +1142,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
namespace: nsToEnsure.Namespace,
|
||||
name: nsToEnsure.Name,
|
||||
}
|
||||
ctx.restoredItems[itemKey] = restoredItemStatus{action: ItemRestoreResultCreated, itemExists: true, createdName: nsToEnsure.Name}
|
||||
ctx.restoredItems[itemKey] = restoredItemStatus{action: ItemRestoreResultCreated, itemExists: true}
|
||||
}
|
||||
} else {
|
||||
if boolptr.IsSetToFalse(ctx.restore.Spec.IncludeClusterResources) {
|
||||
@@ -1514,11 +1514,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
createdObj, restoreErr = resourceClient.Create(obj)
|
||||
if restoreErr == nil {
|
||||
itemExists = true
|
||||
ctx.restoredItems[itemKey] = restoredItemStatus{
|
||||
action: ItemRestoreResultCreated,
|
||||
itemExists: itemExists,
|
||||
createdName: createdObj.GetName(),
|
||||
}
|
||||
ctx.restoredItems[itemKey] = restoredItemStatus{action: ItemRestoreResultCreated, itemExists: itemExists}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1703,21 +1699,21 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
createdObj.SetManagedFields(obj.GetManagedFields())
|
||||
patchBytes, err := generatePatch(withoutManagedFields, createdObj)
|
||||
if err != nil {
|
||||
restoreLogger.Errorf("error generating patch for managed fields %s: %s", kube.NamespaceAndName(createdObj), err.Error())
|
||||
restoreLogger.Errorf("error generating patch for managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
if patchBytes != nil {
|
||||
if _, err = resourceClient.Patch(createdObj.GetName(), patchBytes); err != nil {
|
||||
if _, err = resourceClient.Patch(obj.GetName(), patchBytes); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
restoreLogger.Errorf("error patch for managed fields %s: %s", kube.NamespaceAndName(createdObj), err.Error())
|
||||
restoreLogger.Errorf("error patch for managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
restoreLogger.Warnf("item not found when patching managed fields %s: %s", kube.NamespaceAndName(createdObj), err.Error())
|
||||
restoreLogger.Warnf("item not found when patching managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
} else {
|
||||
restoreLogger.Infof("the managed fields for %s is patched", kube.NamespaceAndName(createdObj))
|
||||
restoreLogger.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1091,8 +1091,8 @@ func TestRestoreItems(t *testing.T) {
|
||||
),
|
||||
},
|
||||
expectedRestoreItems: map[itemKey]restoredItemStatus{
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true, createdName: "ns-1"},
|
||||
{resource: "v1/Pod", namespace: "ns-1", name: "pod-1"}: {action: "created", itemExists: true, createdName: "pod-1"},
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
|
||||
{resource: "v1/Pod", namespace: "ns-1", name: "pod-1"}: {action: "created", itemExists: true},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1201,7 +1201,7 @@ func TestRestoreItems(t *testing.T) {
|
||||
test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()),
|
||||
},
|
||||
expectedRestoreItems: map[itemKey]restoredItemStatus{
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true, createdName: "ns-1"},
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
|
||||
{resource: "v1/ServiceAccount", namespace: "ns-1", name: "sa-1"}: {action: "skipped", itemExists: true},
|
||||
},
|
||||
},
|
||||
@@ -1220,7 +1220,7 @@ func TestRestoreItems(t *testing.T) {
|
||||
test.Secrets(builder.ForSecret("ns-1", "sa-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Data(map[string][]byte{"key-1": []byte("value-1")}).Result()),
|
||||
},
|
||||
expectedRestoreItems: map[itemKey]restoredItemStatus{
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true, createdName: "ns-1"},
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
|
||||
{resource: "v1/Secret", namespace: "ns-1", name: "sa-1"}: {action: "updated", itemExists: true},
|
||||
},
|
||||
},
|
||||
@@ -1239,7 +1239,7 @@ func TestRestoreItems(t *testing.T) {
|
||||
test.Secrets(builder.ForSecret("ns-1", "sa-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Data(map[string][]byte{"key-1": []byte("value-1")}).Result()),
|
||||
},
|
||||
expectedRestoreItems: map[itemKey]restoredItemStatus{
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true, createdName: "ns-1"},
|
||||
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
|
||||
{resource: "v1/Secret", namespace: "ns-1", name: "sa-1"}: {action: "updated", itemExists: true},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -447,8 +447,13 @@ func GetVolumeSnapshotClassForStorageClass(
|
||||
return &vsClass, nil
|
||||
}
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get VolumeSnapshotClass for provisioner %s, ensure that the desired VolumeSnapshot class has the %s label or %s annotation",
|
||||
provisioner, velerov1api.VolumeSnapshotClassSelectorLabel, velerov1api.VolumeSnapshotClassKubernetesAnnotation)
|
||||
"failed to get VolumeSnapshotClass for provisioner %s: "+
|
||||
"ensure that the desired VolumeSnapshotClass has the %s label or %s annotation, "+
|
||||
"and that its driver matches the StorageClass provisioner",
|
||||
provisioner,
|
||||
velerov1api.VolumeSnapshotClassSelectorLabel,
|
||||
velerov1api.VolumeSnapshotClassKubernetesAnnotation,
|
||||
)
|
||||
}
|
||||
|
||||
// IsVolumeSnapshotClassHasListerSecret returns whether a volumesnapshotclass has a snapshotlister secret
|
||||
@@ -684,7 +689,7 @@ func WaitUntilVSCHandleIsReady(
|
||||
return vsc, nil
|
||||
}
|
||||
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot, events *corev1api.EventList) string {
|
||||
vscName := ""
|
||||
readyToUse := false
|
||||
errMessage := ""
|
||||
@@ -705,6 +710,14 @@ func DiagnoseVS(vs *snapshotv1api.VolumeSnapshot) string {
|
||||
|
||||
diag := fmt.Sprintf("VS %s/%s, bind to %s, readyToUse %v, errMessage %s\n", vs.Namespace, vs.Name, vscName, readyToUse, errMessage)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == vs.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("VS event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -1699,6 +1699,7 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
@@ -1781,11 +1782,81 @@ func TestDiagnoseVS(t *testing.T) {
|
||||
},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage fake-message\n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and empty event",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \n",
|
||||
},
|
||||
{
|
||||
name: "VS with VSC and events",
|
||||
vs: &snapshotv1api.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-vs",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-vs-uid",
|
||||
},
|
||||
Status: &snapshotv1api.VolumeSnapshotStatus{
|
||||
BoundVolumeSnapshotContentName: &vscName,
|
||||
ReadyToUse: &readyToUse,
|
||||
Error: &snapshotv1api.VolumeSnapshotError{},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-vs-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "VS fake-ns/fake-vs, bind to fake-vsc, readyToUse true, errMessage \nVS event reason reason-3, message message-3\nVS event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnoseVS(tc.vs)
|
||||
diag := DiagnoseVS(tc.vs, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -17,12 +17,12 @@ limitations under the License.
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -268,13 +268,21 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiagnosePod(pod *corev1api.Pod) string {
|
||||
func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName)
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
}
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
|
||||
@@ -896,10 +896,11 @@ func TestDiagnosePod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pod with all info",
|
||||
name: "pod with all info but event",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
@@ -928,11 +929,111 @@ func TestDiagnosePod(t *testing.T) {
|
||||
},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and empty event list",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\n",
|
||||
},
|
||||
{
|
||||
name: "pod with all info and events",
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pod",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pod-uid",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Reason: "fake-reason-1",
|
||||
Message: "fake-message-1",
|
||||
},
|
||||
{
|
||||
Type: corev1api.PodScheduled,
|
||||
Status: corev1api.ConditionFalse,
|
||||
Reason: "fake-reason-2",
|
||||
Message: "fake-message-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pod-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "Pod fake-ns/fake-pod, phase Pending, node name fake-node\nPod condition Initialized, status True, reason fake-reason-1, message fake-message-1\nPod condition PodScheduled, status False, reason fake-reason-2, message fake-message-2\nPod event reason reason-3, message message-3\nPod event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePod(tc.pod)
|
||||
diag := DiagnosePod(tc.pod, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -463,8 +463,18 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string {
|
||||
diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName)
|
||||
|
||||
if events != nil {
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning {
|
||||
diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diag
|
||||
}
|
||||
|
||||
func DiagnosePV(pv *corev1api.PersistentVolume) string {
|
||||
|
||||
@@ -1593,10 +1593,11 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
events *corev1api.EventList
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pvc with all info",
|
||||
name: "pvc with all info but events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
@@ -1611,11 +1612,83 @@ func TestDiagnosePVC(t *testing.T) {
|
||||
},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and empty events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\n",
|
||||
},
|
||||
{
|
||||
name: "pvc with all info and events",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pvc",
|
||||
Namespace: "fake-ns",
|
||||
UID: "fake-pvc-uid",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
},
|
||||
events: &corev1api.EventList{Items: []corev1api.Event{
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-1"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-1",
|
||||
Message: "message-1",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-uid-2"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-2",
|
||||
Message: "message-2",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-3",
|
||||
Message: "message-3",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-4",
|
||||
Message: "message-4",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeNormal,
|
||||
Reason: "reason-5",
|
||||
Message: "message-5",
|
||||
},
|
||||
{
|
||||
InvolvedObject: corev1api.ObjectReference{UID: "fake-pvc-uid"},
|
||||
Type: corev1api.EventTypeWarning,
|
||||
Reason: "reason-6",
|
||||
Message: "message-6",
|
||||
},
|
||||
}},
|
||||
expected: "PVC fake-ns/fake-pvc, phase Pending, binding to fake-pv\nPVC event reason reason-3, message message-3\nPVC event reason reason-6, message message-6\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
diag := DiagnosePVC(tc.pvc)
|
||||
diag := DiagnosePVC(tc.pvc, tc.events)
|
||||
assert.Equal(t, tc.expected, diag)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ By default, `velero install` does not install Velero's [File System Backup][3].
|
||||
|
||||
If you've already run `velero install` without the `--use-node-agent` flag, you can run the same command again, including the `--use-node-agent` flag, to add the file system backup to your existing install.
|
||||
|
||||
Note that for some use cases (including installation on OpenShift clusters) the fs-backup pods must run in a Privileged security context. This is configured through the node-agent configmap (see below) by setting `privilegedFsBackup` to `true` in the configmap.
|
||||
|
||||
## CSI Snapshot Data Movement
|
||||
|
||||
Velero node-agent is required by [CSI Snapshot Data Movement][12] when Velero built-in data mover is used. By default, `velero install` does not install Velero's node-agent. To enable it, specify the `--use-node-agent` flag.
|
||||
|
||||
@@ -15,7 +15,7 @@ Note: If less resources are assigned to data mover pods, the data movement activ
|
||||
Refer to [Performance Guidance][3] for a guidance of performance vs. resource usage, and it is highly recommended that you perform your own testing to find the best resource limits for your data.
|
||||
|
||||
Velero introduces a new section in the node-agent configMap, called ```podResources```, through which you can set customized resources configurations for data mover pods.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-config```.
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-configmap```.
|
||||
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
|
||||
### Sample
|
||||
@@ -39,19 +39,19 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap``` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-config=<configMap name>
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
### Priority Class
|
||||
@@ -126,4 +126,4 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/
|
||||
[4]: performance-guidance.md
|
||||
[4]: performance-guidance.md
|
||||
|
||||
@@ -27,22 +27,22 @@ To create the configMap, save something like the above sample to a json file and
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-config``` argument to the spec:
|
||||
To provide the configMap to node-agent, edit the node-agent daemonset and add the ```- --node-agent-configmap`` argument to the spec:
|
||||
1. Open the node-agent daemonset spec
|
||||
```
|
||||
kubectl edit ds node-agent -n velero
|
||||
```
|
||||
2. Add ```- --node-agent-config``` to ```spec.template.spec.containers```
|
||||
2. Add ```- --node-agent-configmap``` to ```spec.template.spec.containers```
|
||||
```
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --node-agent-config=<configMap name>
|
||||
- --node-agent-configmap=<configMap name>
|
||||
```
|
||||
|
||||
[1]: csi-snapshot-data-movement.md
|
||||
[2]: file-system-backup.md
|
||||
[3]: node-agent-concurrency.md
|
||||
[4]: data-movement-node-selection.md
|
||||
[4]: data-movement-node-selection.md
|
||||
|
||||
@@ -215,37 +215,9 @@ data:
|
||||
|
||||
### PVC selected-node
|
||||
|
||||
Velero by default removes PVC's `volume.kubernetes.io/selected-node` annotation during restore, so that the restored PVC could be provisioned appropriately according to ```WaitForFirstConsumer``` rules, storage topologies and the restored pod's schedule result, etc.
|
||||
Velero removes PVC's `volume.kubernetes.io/selected-node` annotation during restore, so that the restored PVC could be provisioned appropriately according to ```WaitForFirstConsumer``` rules, storage topologies and the restored pod's schedule result, etc.
|
||||
|
||||
For more information of how this selected-node annotation matters to PVC restore, see issue https://github.com/vmware-tanzu/velero/issues/9053.
|
||||
|
||||
As an expectation, when you provide the selected-node configuration, Velero sets the annotation to the node in the configuration, if the node doesn't exist in the cluster then the annotation will also be removed.
|
||||
Note: This feature is under deprecation as of Velero 1.15, following Velero deprecation policy. This feature is primarily used to remedy some problems in old Kubernetes versions as described [here](https://github.com/vmware-tanzu/velero/pull/2377). It may not work with the new features of Kubernetes and Velero. For more information, see issue https://github.com/vmware-tanzu/velero/issues/9053 for more information.
|
||||
To configure a selected-node, create a config map in the Velero namespace like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
# any name can be used; Velero uses the labels (below)
|
||||
# to identify it rather than the name
|
||||
name: change-pvc-node-selector-config
|
||||
# must be in the velero namespace
|
||||
namespace: velero
|
||||
# the below labels should be used verbatim in your
|
||||
# ConfigMap.
|
||||
labels:
|
||||
# this value-less label identifies the ConfigMap as
|
||||
# config for a plugin (i.e. the built-in restore item action plugin)
|
||||
velero.io/plugin-config: ""
|
||||
# this label identifies the name and kind of plugin
|
||||
# that this ConfigMap is for.
|
||||
velero.io/change-pvc-node-selector: RestoreItemAction
|
||||
data:
|
||||
# add 1+ key-value pairs here, where the key is the old
|
||||
# node name and the value is the new node name.
|
||||
<old-node-name>: <new-node-name>
|
||||
```
|
||||
For more information of how this selected-node annotation matters to PVC restore, see issue https://github.com/vmware-tanzu/velero/issues/9053.
|
||||
|
||||
## Restoring into a different namespace
|
||||
|
||||
|
||||
@@ -23,8 +23,6 @@ By default, `velero install` does not install Velero's [File System Backup][3].
|
||||
|
||||
If you've already run `velero install` without the `--use-node-agent` flag, you can run the same command again, including the `--use-node-agent` flag, to add the file system backup to your existing install.
|
||||
|
||||
Note that for some use cases (including installation on OpenShift clusters) the fs-backup pods must run in a Privileged security context. This is configured through the node-agent configmap (see below) by setting `privilegedFsBackup` to `true` in the configmap.
|
||||
|
||||
## CSI Snapshot Data Movement
|
||||
|
||||
Velero node-agent is required by [CSI Snapshot Data Movement][12] when Velero built-in data mover is used. By default, `velero install` does not install Velero's node-agent. To enable it, specify the `--use-node-agent` flag.
|
||||
|
||||
@@ -39,10 +39,12 @@ import (
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/basic/resources-check"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/bsl-mgmt"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/migration"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/nodeagentconfig"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/parallelfilesdownload"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/parallelfilesupload"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/privilegesmgmt"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/pv-backup"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/repomaintenance"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/resource-filtering"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/resourcemodifiers"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/resourcepolicies"
|
||||
@@ -660,6 +662,24 @@ var _ = Describe(
|
||||
ParallelFilesDownloadTest,
|
||||
)
|
||||
|
||||
var _ = Describe(
|
||||
"Test Repository Maintenance Job Configuration's global part",
|
||||
Label("RepoMaintenance", "LongTime"),
|
||||
GlobalRepoMaintenanceTest,
|
||||
)
|
||||
|
||||
var _ = Describe(
|
||||
"Test Repository Maintenance Job Configuration's specific part",
|
||||
Label("RepoMaintenance", "LongTime"),
|
||||
SpecificRepoMaintenanceTest,
|
||||
)
|
||||
|
||||
var _ = Describe(
|
||||
"Test node agent config's LoadAffinity part",
|
||||
Label("NodeAgentConfig", "LoadAffinity"),
|
||||
LoadAffinities,
|
||||
)
|
||||
|
||||
func GetKubeConfigContext() error {
|
||||
var err error
|
||||
var tcDefault, tcStandby k8s.TestClient
|
||||
@@ -740,6 +760,12 @@ var _ = BeforeSuite(func() {
|
||||
).To(Succeed())
|
||||
}
|
||||
|
||||
By("Install PriorityClasses for E2E.")
|
||||
Expect(veleroutil.CreatePriorityClasses(
|
||||
context.Background(),
|
||||
test.VeleroCfg.ClientToInstallVelero.Kubebuilder,
|
||||
)).To(Succeed())
|
||||
|
||||
if test.InstallVelero {
|
||||
By("Install test resources before testing")
|
||||
Expect(
|
||||
@@ -764,6 +790,8 @@ var _ = AfterSuite(func() {
|
||||
test.StorageClassName,
|
||||
),
|
||||
).To(Succeed())
|
||||
|
||||
By("Delete PriorityClasses created by E2E")
|
||||
Expect(
|
||||
k8s.DeleteStorageClass(
|
||||
ctx,
|
||||
@@ -783,6 +811,11 @@ var _ = AfterSuite(func() {
|
||||
).To(Succeed())
|
||||
}
|
||||
|
||||
Expect(veleroutil.DeletePriorityClasses(
|
||||
ctx,
|
||||
test.VeleroCfg.ClientToInstallVelero.Kubebuilder,
|
||||
)).To(Succeed())
|
||||
|
||||
// If the Velero is installed during test, and the FailFast is not enabled,
|
||||
// uninstall Velero. If not, either Velero is not installed, or kept it for debug on failure.
|
||||
if test.InstallVelero && (testSuitePassed || !test.VeleroCfg.FailFast) {
|
||||
|
||||
@@ -342,6 +342,12 @@ func (m *migrationE2E) Restore() error {
|
||||
Expect(veleroutil.InstallStorageClasses(
|
||||
m.VeleroCfg.StandbyClusterCloudProvider)).To(Succeed())
|
||||
|
||||
By("Install PriorityClass for E2E.")
|
||||
Expect(veleroutil.CreatePriorityClasses(
|
||||
context.Background(),
|
||||
test.VeleroCfg.StandbyClient.Kubebuilder,
|
||||
)).To(Succeed())
|
||||
|
||||
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
|
||||
m.VeleroCfg.UseVolumeSnapshots {
|
||||
By("Install VolumeSnapshotClass for E2E.")
|
||||
@@ -447,6 +453,7 @@ func (m *migrationE2E) Clean() error {
|
||||
|
||||
Expect(k8sutil.KubectlConfigUseContext(
|
||||
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
|
||||
|
||||
m.VeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
|
||||
m.VeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName
|
||||
|
||||
@@ -459,7 +466,6 @@ func (m *migrationE2E) Clean() error {
|
||||
fmt.Println("Fail to delete StorageClass1: ", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := k8sutil.DeleteStorageClass(
|
||||
m.Ctx,
|
||||
*m.VeleroCfg.ClientToInstallVelero,
|
||||
@@ -469,6 +475,12 @@ func (m *migrationE2E) Clean() error {
|
||||
return
|
||||
}
|
||||
|
||||
By("Delete PriorityClasses created by E2E")
|
||||
Expect(veleroutil.DeletePriorityClasses(
|
||||
m.Ctx,
|
||||
m.VeleroCfg.ClientToInstallVelero.Kubebuilder,
|
||||
)).To(Succeed())
|
||||
|
||||
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
|
||||
m.VeleroCfg.UseVolumeSnapshots {
|
||||
By("Delete VolumeSnapshotClass created by E2E")
|
||||
|
||||
347
test/e2e/nodeagentconfig/node-agent-config.go
Normal file
347
test/e2e/nodeagentconfig/node-agent-config.go
Normal file
@@ -0,0 +1,347 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeagentconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/pkg/errors"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
velerokubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
|
||||
)
|
||||
|
||||
type NodeAgentConfigTestCase struct {
|
||||
TestCase
|
||||
nodeAgentConfigs velerotypes.NodeAgentConfigs
|
||||
nodeAgentConfigMapName string
|
||||
}
|
||||
|
||||
var LoadAffinities func() = TestFunc(&NodeAgentConfigTestCase{
|
||||
nodeAgentConfigs: velerotypes.NodeAgentConfigs{
|
||||
LoadAffinity: []*kube.LoadAffinity{
|
||||
{
|
||||
NodeSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
},
|
||||
},
|
||||
StorageClass: test.StorageClassName,
|
||||
},
|
||||
{
|
||||
NodeSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/arch": "amd64",
|
||||
},
|
||||
},
|
||||
StorageClass: test.StorageClassName2,
|
||||
},
|
||||
},
|
||||
BackupPVCConfig: map[string]velerotypes.BackupPVC{
|
||||
test.StorageClassName: {
|
||||
StorageClass: test.StorageClassName2,
|
||||
},
|
||||
},
|
||||
RestorePVCConfig: &velerotypes.RestorePVC{
|
||||
IgnoreDelayBinding: true,
|
||||
},
|
||||
PriorityClassName: test.PriorityClassNameForDataMover,
|
||||
},
|
||||
nodeAgentConfigMapName: "node-agent-config",
|
||||
})
|
||||
|
||||
func (n *NodeAgentConfigTestCase) Init() error {
|
||||
// generate random number as UUIDgen and set one default timeout duration
|
||||
n.TestCase.Init()
|
||||
|
||||
// generate variable names based on CaseBaseName + UUIDgen
|
||||
n.CaseBaseName = "node-agent-config-" + n.UUIDgen
|
||||
n.BackupName = "backup-" + n.CaseBaseName
|
||||
n.RestoreName = "restore-" + n.CaseBaseName
|
||||
|
||||
// generate namespaces by NamespacesTotal
|
||||
n.NamespacesTotal = 1
|
||||
n.NSIncluded = &[]string{}
|
||||
for nsNum := 0; nsNum < n.NamespacesTotal; nsNum++ {
|
||||
createNSName := fmt.Sprintf("%s-%00000d", n.CaseBaseName, nsNum)
|
||||
*n.NSIncluded = append(*n.NSIncluded, createNSName)
|
||||
}
|
||||
|
||||
// assign values to the inner variable for specific case
|
||||
n.VeleroCfg.UseNodeAgent = true
|
||||
n.VeleroCfg.UseNodeAgentWindows = true
|
||||
|
||||
// Need to verify the data mover pod content, so don't wait until backup completion.
|
||||
n.BackupArgs = []string{
|
||||
"create", "--namespace", n.VeleroCfg.VeleroNamespace, "backup", n.BackupName,
|
||||
"--include-namespaces", strings.Join(*n.NSIncluded, ","),
|
||||
"--snapshot-volumes=true", "--snapshot-move-data",
|
||||
}
|
||||
|
||||
// Need to verify the data mover pod content, so don't wait until restore completion.
|
||||
n.RestoreArgs = []string{
|
||||
"create", "--namespace", n.VeleroCfg.VeleroNamespace, "restore", n.RestoreName,
|
||||
"--from-backup", n.BackupName,
|
||||
}
|
||||
|
||||
// Message output by ginkgo
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: "Validate Node Agent ConfigMap configuration",
|
||||
FailedMSG: "Failed to apply and / or validate configuration in VGDP pod.",
|
||||
Text: "Should be able to apply and validate configuration in VGDP pod.",
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NodeAgentConfigTestCase) InstallVelero() error {
|
||||
// Because this test needs to use customized Node Agent ConfigMap,
|
||||
// need to uninstall and reinstall Velero.
|
||||
|
||||
fmt.Println("Start to uninstall Velero")
|
||||
if err := veleroutil.VeleroUninstall(n.Ctx, n.VeleroCfg); err != nil {
|
||||
fmt.Printf("Fail to uninstall Velero: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
result, err := json.Marshal(n.nodeAgentConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMaintenanceConfig := builder.ForConfigMap(n.VeleroCfg.VeleroNamespace, n.nodeAgentConfigMapName).
|
||||
Data("node-agent-config", string(result)).Result()
|
||||
|
||||
n.VeleroCfg.NodeAgentConfigMap = n.nodeAgentConfigMapName
|
||||
|
||||
return veleroutil.PrepareVelero(
|
||||
n.Ctx,
|
||||
n.CaseBaseName,
|
||||
n.VeleroCfg,
|
||||
repoMaintenanceConfig,
|
||||
)
|
||||
}
|
||||
|
||||
func (n *NodeAgentConfigTestCase) CreateResources() error {
|
||||
for _, ns := range *n.NSIncluded {
|
||||
if err := k8sutil.CreateNamespace(n.Ctx, n.Client, ns); err != nil {
|
||||
fmt.Printf("Fail to create ns %s: %s\n", ns, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
pvc, err := k8sutil.CreatePVC(n.Client, ns, "volume-1", test.StorageClassName, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("Fail to create PVC %s: %s\n", "volume-1", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
vols := k8sutil.CreateVolumes(pvc.Name, []string{"volume-1"})
|
||||
|
||||
deployment := k8sutil.NewDeployment(
|
||||
n.CaseBaseName,
|
||||
(*n.NSIncluded)[0],
|
||||
1,
|
||||
map[string]string{"app": "test"},
|
||||
n.VeleroCfg.ImageRegistryProxy,
|
||||
n.VeleroCfg.WorkerOS,
|
||||
).WithVolume(vols).Result()
|
||||
|
||||
deployment, err = k8sutil.CreateDeployment(n.Client.ClientGo, ns, deployment)
|
||||
if err != nil {
|
||||
fmt.Printf("Fail to create deployment %s: %s \n", deployment.Name, err.Error())
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to create deployment: %s", err.Error()))
|
||||
}
|
||||
|
||||
if err := k8sutil.WaitForReadyDeployment(n.Client.ClientGo, deployment.Namespace, deployment.Name); err != nil {
|
||||
fmt.Printf("Fail to create deployment %s: %s\n", n.CaseBaseName, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NodeAgentConfigTestCase) Backup() error {
|
||||
if err := veleroutil.VeleroCmdExec(n.Ctx, n.VeleroCfg.VeleroCLI, n.BackupArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backupPodList := new(corev1api.PodList)
|
||||
|
||||
wait.PollUntilContextTimeout(n.Ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
duList := new(velerov2alpha1api.DataUploadList)
|
||||
if err := n.VeleroCfg.ClientToInstallVelero.Kubebuilder.List(
|
||||
n.Ctx,
|
||||
duList,
|
||||
&client.ListOptions{Namespace: n.VeleroCfg.VeleroNamespace},
|
||||
); err != nil {
|
||||
fmt.Printf("Fail to list DataUpload: %s\n", err.Error())
|
||||
return false, fmt.Errorf("Fail to list DataUpload: %w", err)
|
||||
} else {
|
||||
if len(duList.Items) <= 0 {
|
||||
fmt.Println("No DataUpload found yet. Continue polling.")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := n.VeleroCfg.ClientToInstallVelero.Kubebuilder.List(
|
||||
n.Ctx,
|
||||
backupPodList,
|
||||
&client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
velerov1api.DataUploadLabel: duList.Items[0].Name,
|
||||
}),
|
||||
}); err != nil {
|
||||
fmt.Printf("Fail to list backupPod %s\n", err.Error())
|
||||
return false, errors.Wrapf(err, "error to list backup pods")
|
||||
} else {
|
||||
if len(backupPodList.Items) <= 0 {
|
||||
fmt.Println("No backupPod found yet. Continue polling.")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
fmt.Println("Start to verify backupPod content.")
|
||||
|
||||
Expect(backupPodList.Items[0].Spec.PriorityClassName).To(Equal(n.nodeAgentConfigs.PriorityClassName))
|
||||
|
||||
// In backup, only the second element of LoadAffinity array should be used.
|
||||
expectedAffinity := velerokubeutil.ToSystemAffinity(n.nodeAgentConfigs.LoadAffinity[1:])
|
||||
|
||||
Expect(backupPodList.Items[0].Spec.Affinity).To(Equal(expectedAffinity))
|
||||
|
||||
fmt.Println("backupPod content verification completed successfully.")
|
||||
|
||||
wait.PollUntilContextTimeout(n.Ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
backup := new(velerov1api.Backup)
|
||||
if err := n.VeleroCfg.ClientToInstallVelero.Kubebuilder.Get(
|
||||
n.Ctx,
|
||||
client.ObjectKey{Namespace: n.VeleroCfg.VeleroNamespace, Name: n.BackupName},
|
||||
backup,
|
||||
); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseCompleted &&
|
||||
backup.Status.Phase != velerov1api.BackupPhaseFailed &&
|
||||
backup.Status.Phase != velerov1api.BackupPhasePartiallyFailed {
|
||||
fmt.Printf("backup status is %s. Continue polling until backup reach to a final state.\n", backup.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NodeAgentConfigTestCase) Restore() error {
|
||||
if err := veleroutil.VeleroCmdExec(n.Ctx, n.VeleroCfg.VeleroCLI, n.RestoreArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restorePodList := new(corev1api.PodList)
|
||||
|
||||
wait.PollUntilContextTimeout(n.Ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
ddList := new(velerov2alpha1api.DataDownloadList)
|
||||
if err := n.VeleroCfg.ClientToInstallVelero.Kubebuilder.List(
|
||||
n.Ctx,
|
||||
ddList,
|
||||
&client.ListOptions{Namespace: n.VeleroCfg.VeleroNamespace},
|
||||
); err != nil {
|
||||
fmt.Printf("Fail to list DataDownload: %s\n", err.Error())
|
||||
return false, fmt.Errorf("Fail to list DataDownload %w", err)
|
||||
} else {
|
||||
if len(ddList.Items) <= 0 {
|
||||
fmt.Println("No DataDownload found yet. Continue polling.")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := n.VeleroCfg.ClientToInstallVelero.Kubebuilder.List(
|
||||
n.Ctx,
|
||||
restorePodList,
|
||||
&client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
velerov1api.DataDownloadLabel: ddList.Items[0].Name,
|
||||
}),
|
||||
}); err != nil {
|
||||
fmt.Printf("Fail to list restorePod %s\n", err.Error())
|
||||
return false, errors.Wrapf(err, "error to list restore pods")
|
||||
} else {
|
||||
if len(restorePodList.Items) <= 0 {
|
||||
fmt.Println("No restorePod found yet. Continue polling.")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
fmt.Println("Start to verify restorePod content.")
|
||||
|
||||
Expect(restorePodList.Items[0].Spec.PriorityClassName).To(Equal(n.nodeAgentConfigs.PriorityClassName))
|
||||
|
||||
// In restore, only the first element of LoadAffinity array should be used.
|
||||
expectedAffinity := velerokubeutil.ToSystemAffinity(n.nodeAgentConfigs.LoadAffinity[:1])
|
||||
|
||||
Expect(restorePodList.Items[0].Spec.Affinity).To(Equal(expectedAffinity))
|
||||
|
||||
fmt.Println("restorePod content verification completed successfully.")
|
||||
|
||||
wait.PollUntilContextTimeout(n.Ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
restore := new(velerov1api.Restore)
|
||||
if err := n.VeleroCfg.ClientToInstallVelero.Kubebuilder.Get(
|
||||
n.Ctx,
|
||||
client.ObjectKey{Namespace: n.VeleroCfg.VeleroNamespace, Name: n.RestoreName},
|
||||
restore,
|
||||
); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if restore.Status.Phase != velerov1api.RestorePhaseCompleted &&
|
||||
restore.Status.Phase != velerov1api.RestorePhaseFailed &&
|
||||
restore.Status.Phase != velerov1api.RestorePhasePartiallyFailed {
|
||||
fmt.Printf("restore status is %s. Continue polling until restore reach to a final state.\n", restore.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
247
test/e2e/repomaintenance/repo_maintenance_config.go
Normal file
247
test/e2e/repomaintenance/repo_maintenance_config.go
Normal file
@@ -0,0 +1,247 @@
|
||||
/*
|
||||
Copyright 2021 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package repomaintenance
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/pkg/errors"
|
||||
batchv1api "k8s.io/api/batch/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
velerokubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||
veleroutil "github.com/vmware-tanzu/velero/test/util/velero"
|
||||
)
|
||||
|
||||
type RepoMaintenanceTestCase struct {
|
||||
TestCase
|
||||
repoMaintenanceConfigMapName string
|
||||
repoMaintenanceConfigKey string
|
||||
jobConfigs velerotypes.JobConfigs
|
||||
}
|
||||
|
||||
var keepJobNum = 1
|
||||
|
||||
var GlobalRepoMaintenanceTest func() = TestFunc(&RepoMaintenanceTestCase{
|
||||
repoMaintenanceConfigKey: "global",
|
||||
repoMaintenanceConfigMapName: "global",
|
||||
jobConfigs: velerotypes.JobConfigs{
|
||||
KeepLatestMaintenanceJobs: &keepJobNum,
|
||||
PodResources: &velerokubeutil.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "100Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "200Mi",
|
||||
},
|
||||
PriorityClassName: test.PriorityClassNameForRepoMaintenance,
|
||||
},
|
||||
})
|
||||
|
||||
var SpecificRepoMaintenanceTest func() = TestFunc(&RepoMaintenanceTestCase{
|
||||
repoMaintenanceConfigKey: "",
|
||||
repoMaintenanceConfigMapName: "specific",
|
||||
jobConfigs: velerotypes.JobConfigs{
|
||||
KeepLatestMaintenanceJobs: &keepJobNum,
|
||||
PodResources: &velerokubeutil.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "100Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "200Mi",
|
||||
},
|
||||
PriorityClassName: test.PriorityClassNameForRepoMaintenance,
|
||||
},
|
||||
})
|
||||
|
||||
func (r *RepoMaintenanceTestCase) Init() error {
|
||||
// generate random number as UUIDgen and set one default timeout duration
|
||||
r.TestCase.Init()
|
||||
|
||||
// generate variable names based on CaseBaseName + UUIDgen
|
||||
r.CaseBaseName = "repo-maintenance-" + r.UUIDgen
|
||||
r.BackupName = "backup-" + r.CaseBaseName
|
||||
r.RestoreName = "restore-" + r.CaseBaseName
|
||||
|
||||
// generate namespaces by NamespacesTotal
|
||||
r.NamespacesTotal = 1
|
||||
r.NSIncluded = &[]string{}
|
||||
for nsNum := 0; nsNum < r.NamespacesTotal; nsNum++ {
|
||||
createNSName := fmt.Sprintf("%s-%00000d", r.CaseBaseName, nsNum)
|
||||
*r.NSIncluded = append(*r.NSIncluded, createNSName)
|
||||
}
|
||||
|
||||
// If repoMaintenanceConfigKey is not set, it means testing the specific repo case.
|
||||
// Need to assemble the BackupRepository name. The format is "volumeNamespace-bslName-uploaderName"
|
||||
if r.repoMaintenanceConfigKey == "" {
|
||||
r.repoMaintenanceConfigKey = (*r.NSIncluded)[0] + "-" + "default" + "-" + test.UploaderTypeKopia
|
||||
}
|
||||
|
||||
// assign values to the inner variable for specific case
|
||||
r.VeleroCfg.UseNodeAgent = true
|
||||
r.VeleroCfg.UseNodeAgentWindows = true
|
||||
|
||||
r.BackupArgs = []string{
|
||||
"create", "--namespace", r.VeleroCfg.VeleroNamespace, "backup", r.BackupName,
|
||||
"--include-namespaces", strings.Join(*r.NSIncluded, ","),
|
||||
"--snapshot-volumes=true", "--snapshot-move-data", "--wait",
|
||||
}
|
||||
|
||||
// Message output by ginkgo
|
||||
r.TestMsg = &TestMSG{
|
||||
Desc: "Validate Repository Maintenance Job configuration",
|
||||
FailedMSG: "Failed to apply and / or validate configuration in repository maintenance jobs.",
|
||||
Text: "Should be able to apply and validate configuration in repository maintenance jobs.",
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RepoMaintenanceTestCase) InstallVelero() error {
|
||||
// Because this test needs to use customized repository maintenance ConfigMap,
|
||||
// need to uninstall and reinstall Velero.
|
||||
|
||||
fmt.Println("Start to uninstall Velero")
|
||||
if err := veleroutil.VeleroUninstall(r.Ctx, r.VeleroCfg); err != nil {
|
||||
fmt.Printf("Fail to uninstall Velero: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
result, err := json.Marshal(r.jobConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMaintenanceConfig := builder.ForConfigMap(r.VeleroCfg.VeleroNamespace, r.repoMaintenanceConfigMapName).
|
||||
Data(r.repoMaintenanceConfigKey, string(result)).Result()
|
||||
|
||||
r.VeleroCfg.RepoMaintenanceJobConfigMap = r.repoMaintenanceConfigMapName
|
||||
|
||||
return veleroutil.PrepareVelero(
|
||||
r.Ctx,
|
||||
r.CaseBaseName,
|
||||
r.VeleroCfg,
|
||||
repoMaintenanceConfig,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *RepoMaintenanceTestCase) CreateResources() error {
|
||||
for _, ns := range *r.NSIncluded {
|
||||
if err := k8sutil.CreateNamespace(r.Ctx, r.Client, ns); err != nil {
|
||||
fmt.Printf("Fail to create ns %s: %s\n", ns, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
pvc, err := k8sutil.CreatePVC(r.Client, ns, "volume-1", test.StorageClassName, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("Fail to create PVC %s: %s\n", "volume-1", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
vols := k8sutil.CreateVolumes(pvc.Name, []string{"volume-1"})
|
||||
|
||||
deployment := k8sutil.NewDeployment(
|
||||
r.CaseBaseName,
|
||||
(*r.NSIncluded)[0],
|
||||
1,
|
||||
map[string]string{"app": "test"},
|
||||
r.VeleroCfg.ImageRegistryProxy,
|
||||
r.VeleroCfg.WorkerOS,
|
||||
).WithVolume(vols).Result()
|
||||
|
||||
deployment, err = k8sutil.CreateDeployment(r.Client.ClientGo, ns, deployment)
|
||||
if err != nil {
|
||||
fmt.Printf("Fail to create deployment %s: %s \n", deployment.Name, err.Error())
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to create deployment: %s", err.Error()))
|
||||
}
|
||||
|
||||
if err := k8sutil.WaitForReadyDeployment(r.Client.ClientGo, deployment.Namespace, deployment.Name); err != nil {
|
||||
fmt.Printf("Fail to create deployment %s: %s\n", r.CaseBaseName, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RepoMaintenanceTestCase) Verify() error {
|
||||
// Reduce the MaintenanceFrequency to 1 minute.
|
||||
backupRepositoryList := new(velerov1api.BackupRepositoryList)
|
||||
if err := r.VeleroCfg.ClientToInstallVelero.Kubebuilder.List(
|
||||
r.Ctx,
|
||||
backupRepositoryList,
|
||||
&client.ListOptions{
|
||||
Namespace: r.VeleroCfg.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{velerov1api.VolumeNamespaceLabel: (*r.NSIncluded)[0]}),
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(backupRepositoryList.Items) <= 0 {
|
||||
return fmt.Errorf("fail list BackupRepository. no item is returned")
|
||||
}
|
||||
|
||||
backupRepository := backupRepositoryList.Items[0]
|
||||
|
||||
updated := backupRepository.DeepCopy()
|
||||
updated.Spec.MaintenanceFrequency = metav1.Duration{Duration: time.Minute}
|
||||
if err := r.VeleroCfg.ClientToInstallVelero.Kubebuilder.Patch(r.Ctx, updated, client.MergeFrom(&backupRepository)); err != nil {
|
||||
fmt.Printf("failed to patch BackupRepository %q: %s", backupRepository.GetName(), err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// The minimal time unit of Repository Maintenance is 5 minutes.
|
||||
// Wait for more than one cycles to make sure the result is valid.
|
||||
time.Sleep(6 * time.Minute)
|
||||
|
||||
jobList := new(batchv1api.JobList)
|
||||
if err := r.VeleroCfg.ClientToInstallVelero.Kubebuilder.List(r.Ctx, jobList, &client.ListOptions{
|
||||
Namespace: r.VeleroCfg.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{"velero.io/repo-name": backupRepository.Name}),
|
||||
}); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
resources, err := kube.ParseResourceRequirements(
|
||||
r.jobConfigs.PodResources.CPURequest,
|
||||
r.jobConfigs.PodResources.MemoryRequest,
|
||||
r.jobConfigs.PodResources.CPULimit,
|
||||
r.jobConfigs.PodResources.MemoryLimit,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse resource requirements for maintenance job")
|
||||
}
|
||||
|
||||
Expect(jobList.Items[0].Spec.Template.Spec.Containers[0].Resources).To(Equal(resources))
|
||||
|
||||
Expect(jobList.Items).To(HaveLen(*r.jobConfigs.KeepLatestMaintenanceJobs))
|
||||
|
||||
Expect(jobList.Items[0].Spec.Template.Spec.PriorityClassName).To(Equal(r.jobConfigs.PriorityClassName))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -41,6 +41,7 @@ depends on your test patterns.
|
||||
*/
|
||||
type VeleroBackupRestoreTest interface {
|
||||
Init() error
|
||||
InstallVelero() error
|
||||
CreateResources() error
|
||||
Backup() error
|
||||
Destroy() error
|
||||
@@ -109,6 +110,10 @@ func (t *TestCase) GenerateUUID() string {
|
||||
return fmt.Sprintf("%08d", rand.IntN(100000000))
|
||||
}
|
||||
|
||||
func (t *TestCase) InstallVelero() error {
|
||||
return PrepareVelero(context.Background(), t.GetTestCase().CaseBaseName, t.GetTestCase().VeleroCfg)
|
||||
}
|
||||
|
||||
func (t *TestCase) CreateResources() error {
|
||||
return nil
|
||||
}
|
||||
@@ -221,7 +226,8 @@ func RunTestCase(test VeleroBackupRestoreTest) error {
|
||||
fmt.Printf("Running test case %s %s\n", test.GetTestMsg().Desc, time.Now().Format("2006-01-02 15:04:05"))
|
||||
|
||||
if InstallVelero {
|
||||
Expect(PrepareVelero(context.Background(), test.GetTestCase().CaseBaseName, test.GetTestCase().VeleroCfg)).To(Succeed())
|
||||
fmt.Printf("Install Velero for test case %s: %s", test.GetTestCase().CaseBaseName, time.Now().Format("2006-01-02 15:04:05"))
|
||||
Expect(test.InstallVelero()).To(Succeed())
|
||||
}
|
||||
|
||||
defer test.Clean()
|
||||
|
||||
@@ -57,6 +57,11 @@ const (
|
||||
BackupRepositoryConfigName = "backup-repository-config"
|
||||
)
|
||||
|
||||
const (
|
||||
PriorityClassNameForDataMover = "data-mover"
|
||||
PriorityClassNameForRepoMaintenance = "repo-maintenance"
|
||||
)
|
||||
|
||||
var PublicCloudProviders = []string{AWS, Azure, GCP, Vsphere}
|
||||
var LocalCloudProviders = []string{Kind, VanillaZFS}
|
||||
var CloudProviders = append(PublicCloudProviders, LocalCloudProviders...)
|
||||
|
||||
@@ -17,12 +17,12 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
||||
|
||||
@@ -17,11 +17,12 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
||||
)
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
@@ -17,12 +17,12 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
@@ -17,13 +17,13 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
||||
)
|
||||
|
||||
@@ -17,11 +17,11 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
@@ -17,12 +17,12 @@ limitations under the License.
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user