Compare commits

..

1 Commits

Author SHA1 Message Date
Xun Jiang
c05e6d21a3 Dump the github context content
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-09-30 17:14:54 +08:00
270 changed files with 1368 additions and 8739 deletions

View File

@@ -21,7 +21,7 @@ jobs:
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6
@@ -112,7 +112,7 @@ jobs:
fail-fast: false
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6
@@ -185,7 +185,7 @@ jobs:
timeout-minutes: 30
- name: Upload debug bundle
if: ${{ failure() }}
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: DebugBundle-k8s-${{ matrix.k8s }}-job-${{ strategy.job-index }}
name: DebugBundle
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*

View File

@@ -16,8 +16,13 @@ jobs:
outputs:
version: ${{ steps.pick-version.outputs.version }}
steps:
- name: Dump github context
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT"
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- id: pick-version
run: |

View File

@@ -19,7 +19,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master

View File

@@ -12,7 +12,7 @@ jobs:
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Changelog check
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'kind/changelog-not-required') || contains(github.event.pull_request.labels.*.name, 'Design') || contains(github.event.pull_request.labels.*.name, 'Website') || contains(github.event.pull_request.labels.*.name, 'Documentation'))}}

View File

@@ -14,7 +14,7 @@ jobs:
fail-fast: false
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6

View File

@@ -8,7 +8,7 @@ jobs:
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Codespell
uses: codespell-project/actions-codespell@master

View File

@@ -13,7 +13,7 @@ jobs:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
name: Checkout
- name: Set up QEMU

View File

@@ -14,7 +14,7 @@ jobs:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
name: Checkout
- name: Verify .goreleaser.yml and try a dryrun release.

View File

@@ -18,7 +18,7 @@ jobs:
needs: get-go-version
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6
@@ -26,7 +26,7 @@ jobs:
go-version: ${{ needs.get-go-version.outputs.version }}
- name: Linter check
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v8
with:
version: v2.5.0
version: v2.1.1
args: --verbose

View File

@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
# there are at least two commits: the first one is the merge commit and the second one is the real commit

View File

@@ -12,7 +12,7 @@ jobs:
get-go-version:
uses: ./.github/workflows/get-go-version.yaml
with:
ref: ${{ github.ref_name }}
ref: ${{ github.ref }}
build:
name: Build
@@ -20,7 +20,7 @@ jobs:
needs: get-go-version
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the latest code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Automatic Rebase

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10.1.1
- uses: actions/stale@v10.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."

View File

@@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
ARG GOPROXY
ARG BIN
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS restic-builder
ARG GOPROXY
ARG BIN

View File

@@ -15,7 +15,7 @@
ARG OS_VERSION=1809
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
ARG GOPROXY
ARG BIN

View File

@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.25 as tilt-helper
FROM golang:1.24 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@@ -1 +0,0 @@
Add `--apply` flag to `install` command, allowing usage of Kubernetes apply to make changes to existing installs

View File

@@ -1 +0,0 @@
Fix issue #7725, add design for backup repo cache configuration

View File

@@ -1,10 +0,0 @@
Implement wildcard namespace pattern expansion for backup namespace includes/excludes.
This change adds support for wildcard patterns (*, ?, [abc], {a,b,c}) in namespace includes and excludes during backup operations.
When wildcard patterns are detected, they are expanded against the list of active namespaces in the cluster before the backup proceeds.
Key features:
- Wildcard patterns in namespace includes/excludes are automatically detected and expanded
- Pattern validation ensures unsupported patterns (regex, consecutive asterisks) are rejected
- Empty wildcard results (e.g., "invalid*" matching no namespaces) correctly result in empty backups
- Exact namespace names and "*" continue to work as before (no expansion needed)

View File

@@ -1 +0,0 @@
Fix issue #7904, remove the code and doc for PVC node selection

View File

@@ -1 +0,0 @@
Fix issue #9193, don't connect repo in repo controller

View File

@@ -1 +0,0 @@
Fix issue #9267, add events to data mover prepare diagnostic

View File

@@ -1 +0,0 @@
Concurrent backup processing

View File

@@ -1 +0,0 @@
Fix typos in documentation

View File

@@ -1 +0,0 @@
Fix issue #9332, add bytesDone for cache files

View File

@@ -1 +0,0 @@
Add cache configuration to VGDP

View File

@@ -1 +0,0 @@
Fix the Job build error when BackupReposiotry name longer than 63.

View File

@@ -1 +0,0 @@
Add cache dir configuration for udmrepo

View File

@@ -1 +0,0 @@
Add snapshotSize for DataDownload, PodVolumeRestore

View File

@@ -1 +0,0 @@
Add incrementalSize to DU/PVB for reporting new/changed size

View File

@@ -1 +0,0 @@
Support cache volume for generic restore exposer and pod volume exposer

View File

@@ -1 +0,0 @@
Fix managed fields patch for resources using GenerateName

View File

@@ -1 +0,0 @@
Track actual resource names for GenerateName in restore status

View File

@@ -1 +0,0 @@
Add cache volume configuration

View File

@@ -1 +0,0 @@
Fix issue #9365, prevent fake completion notification due to multiple update of single PVR

View File

@@ -1 +0,0 @@
Refactor repo provider interface for static configuration

View File

@@ -1 +0,0 @@
don't copy securitycontext from first container if configmap found

View File

@@ -1 +0,0 @@
Cache volume support for DataDownload

View File

@@ -1 +0,0 @@
Cache volume for PVR

View File

@@ -1 +0,0 @@
Fix issue #9400, connect repo first time after creation so that init params could be written

View File

@@ -1 +0,0 @@
Add Prometheus metrics for maintenance jobs

View File

@@ -1 +0,0 @@
Fix issue #9276, add doc for cache volume support

View File

@@ -1 +0,0 @@
Apply volume policies to VolumeGroupSnapshot PVC filtering

View File

@@ -1 +0,0 @@
Fix issue #9194, add doc for GOMAXPROCS behavior change

View File

@@ -1 +0,0 @@
Remove VolumeSnapshotClass from CSI B/R process.

View File

@@ -594,8 +594,6 @@ spec:
description: Phase is the current state of the Backup.
enum:
- New
- Queued
- ReadyToStart
- FailedValidation
- InProgress
- WaitingForPluginOperations
@@ -627,11 +625,6 @@ spec:
filters that happen as items are processed.
type: integer
type: object
queuePosition:
description: |-
QueuePosition is the position of the backup in the queue.
Only relevant when Phase is "Queued"
type: integer
startTimestamp:
description: |-
StartTimestamp records the time a backup was started.

View File

@@ -33,12 +33,6 @@ spec:
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Incremental bytes
format: int64
jsonPath: .status.incrementalBytes
name: Incremental Bytes
priority: 10
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
@@ -195,11 +189,6 @@ spec:
format: date-time
nullable: true
type: string
incrementalBytes:
description: IncrementalBytes holds the number of bytes new or changed
since the last backup
format: int64
type: integer
message:
description: Message is a message about the pod volume backup's status.
type: string

View File

@@ -133,10 +133,6 @@ spec:
snapshotID:
description: SnapshotID is the ID of the volume snapshot to be restored.
type: string
snapshotSize:
description: SnapshotSize is the logical size in Bytes of the snapshot.
format: int64
type: integer
sourceNamespace:
description: SourceNamespace is the original namespace for namaspace
mapping.

File diff suppressed because one or more lines are too long

View File

@@ -108,10 +108,6 @@ spec:
description: SnapshotID is the ID of the Velero backup snapshot to
be restored from.
type: string
snapshotSize:
description: SnapshotSize is the logical size in Bytes of the snapshot.
format: int64
type: integer
sourceNamespace:
description: |-
SourceNamespace is the original namespace where the volume is backed up from.

View File

@@ -33,12 +33,6 @@ spec:
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Incremental bytes
format: int64
jsonPath: .status.incrementalBytes
name: Incremental Bytes
priority: 10
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
@@ -179,11 +173,6 @@ spec:
as a result of the DataUpload.
nullable: true
type: object
incrementalBytes:
description: IncrementalBytes holds the number of bytes new or changed
since the last backup
format: int64
type: integer
message:
description: Message is a message about the DataUpload's status.
type: string

File diff suppressed because one or more lines are too long

View File

@@ -1,70 +0,0 @@
# Apply flag for install command
## Abstract
Add an `--apply` flag to the install command that enables applying existing resources rather than creating them. This can be useful as part of the upgrade process for existing installations.
## Background
The current Velero install command creates resources but doesn't provide a direct way to apply updates to an existing installation.
Users attempting to run the install command on an existing installation receive "already exists" messages.
Upgrade steps for existing installs typically involve a three (or more) step process to apply updated CRDs (using `--dry-run` and piping to `kubectl apply`) and then updating/setting images on the Velero deployment and node-agent.
## Goals
- Provide a simple flag to enable applying resources on an existing Velero installation.
- Use server-side apply to update existing resources rather than attempting to create them.
- Maintain consistency with the regular install flow.
## Non Goals
- Implement special logic for specific version-to-version upgrades (i.e. resource deletion, etc).
- Add complex upgrade validation or pre/post-upgrade hooks.
- Provide rollback capabilities.
## High-Level Design
The `--apply` flag will be added to the Velero install command.
When this flag is set, the installation process will use server-side apply to update existing resources instead of using create on new resources.
This flag can be used as _part_ of the upgrade process, but will not always fully handle an upgrade.
## Detailed Design
The implementation adds a new boolean flag `--apply` to the install command.
This flag will be passed through to the underlying install functions where the resource creation logic resides.
When the flag is set to true:
- The `createOrApplyResource` function will use server-side apply with field manager "velero-cli" and `force=true` to update resources.
- Resources will be applied in the same order as they would be created during installation.
- Custom Resource Definitions will still be processed first, and the system will wait for them to be established before continuing.
The server-side apply approach with `force=true` ensures that resources are updated even if there are conflicts with the last applied state.
This provides a best-effort mechanism to apply resources that follows the same flow as installation but updates resources instead of creating them.
No special handling is added for specific versions or resource structures, making this a general-purpose mechanism for applying resources.
## Alternatives Considered
1. Creating a separate `upgrade` command that would duplicate much of the install command logic.
- Rejected due to code duplication and maintenance overhead.
2. Implementing version-specific upgrade logic to handle breaking changes between versions.
- Rejected as overly complex and difficult to maintain across multiple version paths.
- This could be considered again in the future, but is not in the scope of the current design.
3. Adding automatic detection of existing resources and switching to apply mode.
- Rejected as it could lead to unexpected behavior and confusion if users unintentionally apply changes to existing resources.
## Security Considerations
The apply flag maintains the same security profile as the install command.
No additional permissions are required beyond what is needed for resource creation.
The use of `force=true` with server-side apply could potentially override manual changes made to resources, but this is a necessary trade-off to ensure apply is successful.
## Compatibility
This enhancement is compatible with all existing Velero installations as it is a new opt-in flag.
It does not change any resource formats or API contracts.
The apply process is best-effort and does not guarantee compatibility between arbitrary versions of Velero.
Users should still consult release notes for any breaking changes that may require manual intervention.
This flag could be adopted by the helm chart, specifically for CRD updates, to simplify the CRD update job.
## Implementation
The implementation involves:
1. Adding support for `Apply` to the existing Kubernetes client code.
1. Adding the `--apply` flag to the install command options.
1. Changing `createResource` to `createOrApplyResource` and updating it to use server-side apply when the `apply` boolean is set.
The implementation is straightforward and follows existing code patterns.
No migration of state or special handling of specific resources is required.

View File

@@ -1,231 +0,0 @@
# Backup Repository Cache Volume Design
## Glossary & Abbreviation
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
**Data Mover Pods**: Intermediate pods which hold VGDP and complete the data transfer. See [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3] for details.
**Repository Maintenance Pods**: Pods for [Repository Maintenance Jobs][4], which holds VGDP to run repository maintenance.
## Background
According to the [Unified Repository design][1] Velero uses selectable backup repositories for various backup/restore methods, i.e., fs-backup, volume snapshot data movement, etc. Some backup repositories may need to cache data on the client side for various repository operation, so as to accelerate the execution.
In the existing [Backup Repository Configuration][5], we allow users to configure the cache data size (`cacheLimitMB`). However, the cache data is still stored in the root file system of data mover pods/repository maintenance pods, so stored in the root file system of the node. This is not good enough, reasons:
- In many distributions, the node's system disk size is predefined, non configurable and limit, e.g., the system disk size may be 20G or less
- Velero supports concurrent data movements in each node. The cache in each of the concurrent data mover pods could quickly run out of the system disk and cause problems like pod eviction, failure of pod creation, degradation of Kubernetes QoS, etc.
We need to allow users to prepare a dedicated location, e.g., a dedictated volume, for the cache.
Not all backup repositories or not all backup repository operations require cache, we need to define the details when and how the cache is used.
## Goals
- Create a mechanism for users to configure cache volumes for various pods running VGDP
- Design the workflow to assign the cache volume pod path to backup repositories
- Describe when and how the cache volume is used
## Non-Goals
- The solution is based on [Unified Repository design][1], [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3], legacy data paths are not supported. E.g., when a pod volume restore (PVR) runs with legacy Restic path, if any data is cached, the cache still resides in the root file system.
## Solution
### Cache Data
Varying on backup repositoires, cache data may include payload data or repository metadata, e.g., indexes to the payload data chunks.
Payload data is highly related to the backup data, and normally take the majority of the repository data as well as the cache data.
Repository metadata is related to the backup repository's chunking algorithm, data chunk mapping method, etc, and so the size is not proportional to the backup data size.
On the other hand for some backup repository, in extreme cases, the repository metadata may be significantly large. E.g., Kopia's indexes are per chunks, if there are huge number of small files in the repository, Kopia's index data may be in the same level of or even larger than the payload data.
However, in the cases that repository metadata data become the majority, other bottlenecks may emerge and concurrency of data movers may be significantly constrained, so the requirement to cache volumes may go away.
Therefore, for now we only consider the cache volume requirement for payload data, and leave the consideration for metadata as a future enhancement.
### Scenarios
Backup repository cache varies on backup repositories and backup repository operation during VGDP runs. Below are the scenarios when VGDP runs:
- Data Upload for Backup: this is the process to upload/write the backup data into the backup repository, e.g., DataUpload or PodVolumeBackup. The pieces of data is almost directly written to the repository, sometimes with a small group staying shortly in the local place. That is to say, there should not be large scale data cached for this scenario, so we don't prepare dedicated cache for this scenario.
- Repository Maintenance: Repository maintenance most often visits the backup repository's metadata and sometimes it needs to visit the file system directories from the backed up data. On the other hand, it is not practical to run concurrent maintenance jobs in one node. So the cache data is neither large nor affect the root file system too much. Therefore, we don't need to prepare dedicated cache for this scenario.
- Data Download for Restore: this is the process to download/read the backup data from the backup repository during restore, e.g., DataDownload or PodVolumeRestore. For backup repositories for which data are stored in remote backup storages (e.g., Kopia repository stores data in remote object stores), large scale of data are cached locally to accerlerate the restore. Therefore, we need dedicate cache volumes for this scenario.
- Backup Deletion: During this scenario, backup repository is connected, metadata is enumerated to find the repository snapshot representing the backup data. That is to say, only metadata is cached if any. Therefore, dedicated cache volumes are not required in this scenario.
The above analyses are based on the common behavior of backup repositories and they are not considering the case that backup repository metadata takes majority or siginficant proportion of the cache data.
As a conclusion of the analyses, we will create dedicated cache volumes for restore scenarios.
For other scenarios, we can add them regarded to the future changes/requirements. The mechanism to expose and connect the cache volumes should work for all scenarios. E.g., if we need to consider the backup repository metadata case, we may need cache volumes for backup and repository maintenance as well, then we can just reuse the same cache volume provision and connection mechanism to backup and repository maintenance scenarios.
### Cache Data and Lifecycle
If available, one cache volume is dedicately assigned to one data mover pod. That is, the cached data is destroyed when the data mover pod completes. Then the backup repository instance also closes.
Cache data are fully managed by the specific backup repository. So the backup repository may also have its own way to GC the cache data.
That is to say, cache data GC may be launched by the backup repository instance during the running of the data mover pod; then the left data are automatically destroyed when the data mover pod and the cache PVC are destroyed (cache PVC's `reclaimPolicy` is always `Deleted`, so once the cache PVC is destroyed, the volume will also be destroyed). So no specially logics are needed for cache data GC.
### Data Size
Cache volumes take storage space and cluster resources (PVC, PV), therefore, cache volumes should be created only when necessary and the volumes should be with reasonable size based on the cache data size:
- It is not a good bargain to have cache volumes for small backups, small backups will use resident cache location (the cache location in the root file system)
- The cache data size has a limit, the existing `cacheLimitMB` is used for this purpose. E.g., it could be set as 1024 for a 1TB backup, which means 1GB of data is cached and the old cache data exceeding this size will be cleared. Therefore, it is meaningless to set the cache volume size much larger than `cacheLimitMB`
### Cache Volume Size
The cache volume size is calculated from below factors (for Restore scenarios):
- **Limit**: The limit of the cache data, that is represented by `cacheLimitMB`, the default value is 5GB
- **backupSize**: The size of the backup as a reference to evaluate whether to create a cache volume. It doesn't mean the backup data really decides the cache data all the time, it is just a reference to evaluate the scale of the backup, small scale backups may need small cache data. Sometimes, backupSize is not irrelevant to the size of cache data, in this case, ResidentThreshold should not be set, Limit will be used directly. It is unlikely that backupSize is unavailable, but once that happens, ResidentThreshold is ignored, Limit will be used directly.
- **ResidentThreshold**: The minimum backup size that a cache volume is created
- **InflationPercentage**: Considering the overhead of the file system and the possible delay of the cache cleanup, there should be an inflation for the final volume size vs. the logical size, otherwise, the cache volume may be overrun. This inflation percentage is hardcoded, e.g., 20%.
A formula is as below:
```
cacheVolumeSize = ((backupSize != 0 ? (backupSize > residentThreshold ? limit : 0) : limit) * (100 + inflationPercentage)) / 100
```
Finally, the `cacheVolumeSize` will be rounded up to GiB considering the UX friendliness, storage friendliness and management friendliness.
### PVC/PV
The PVC for a cache volume is created in Velero namespace and a storage class is required for the cache PVC. The PVC's accessMode is `ReadWriteOnce` and volumeMode is `FileSystem`, so the storage class provided should support this specification. Otherwise, if the storageclass doesn't support either of the specifications, the data mover pod may be hang in `Pending` state until a timeout setting with the data movement (e.g. `prepareTimeout`) and the data movement will finally fail.
It is not expected that the cache volume is retained after data mover pod is deleted, so the `reclaimPolicy` for the storageclass must be `Delete`.
To detect the problems in the storageclass and fail earlier, a validation is applied to the storageclass and once the validation fails, the cache configuration will be ignored, so the data mover pod will be created without a cache volume.
### Cache Volume Configurations
Below configurations are introduced:
- **residentThresholdMB**: the minimum data size(in MB) to be processed (if available) that a cache volume is created
- **cacheStorageClass**: the name of the storage class to provision the cache PVC
Not like `cacheLimitMB` which is set to and affect the backup repository, the above two configurations are actually data mover configurations of how to create cache volumes to data mover pods; and the two configurations don't need to be per backup repository. So we add them to the node-agent Configuration.
### Sample
Below are some examples of the node-agent configMap with the configurations:
Sample-1:
```json
{
"cacheVolume": {
"storageClass": "sc-1",
"residentThresholdMB": 1024
}
}
```
Sample-2:
```json
{
"cacheVolume": {
"storageClass": "sc-1",
}
}
```
Sample-3:
```json
{
"cacheVolume": {
"residentThresholdMB": 1024
}
}
```
**sample-1**: This is a valid configuration. Restores with backup data size larger than 1G will be assigned a cache volume using storage class `sc-1`.
**sample-2**: This is a valid configuration. Data mover pods are always assigned a cache volume using storage class `sc-1`.
**sample-3**: This is not a valid configuration because the storage class is absent. Velero gives up creating a cache volume.
To create the configMap, users need to save something like the above sample to a json file and then run below command:
```
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
```
The cache volume configurations will be visited by node-agent server, so they also need to specify the `--node-agent-configmap` to the `velero node-agent` parameters.
## Detailed Design
### Backup and Restore
The restore needs to know the backup size so as to calculate the cache volume size, some new fields are added to the DataDownload and PodVolumeRestore CRDs.
`snapshotSize` field is also added to DataDownload and PodVolumeRestore's `spec`:
```yaml
spec:
snapshotID:
description: SnapshotID is the ID of the Velero backup snapshot to
be restored from.
type: string
snapshotSize:
description: SnapshotSize is the logical size of the snapshot.
format: int64
type: integer
```
`snapshotSize` represents the total size of the backup; during restore, the value is transferred from DataUpload/PodVolumeBackup's `Status.Progress.TotalBytes` to DataDownload/PodVolumeRestore.
It is unlikely that `Status.Progress.TotalBytes` from DataUpload/PodVolumeBackup is unavailable, but once it happens, according to the above formula, `residentThresholdMB` is ignored, cache volume size is calculated directly from cache limit for the corresponding backup repository.
### Exposer
Cache volume configurations are retrieved by node-agent and passed through DataDownload/PodVolumeRestore to GenericRestore exposer/PodVolume exposer.
The exposers are responsible to calculate cache volume size, create cache PVCs and mount them to the restorePods.
If the calculated cache volume size is 0, or any of the critical parameters is missing (e.g., cache volume storage class), the exposers ignore the cache volume configuration and continue with creating restorePods without cache volumes, so no impact to the result of the restore.
Exposers mount the cache volume to a predefined directory and pass the directory to the data mover pods through the `cache-volume-path` parameter.
Below data structure is added to the exposers' expose parameters:
```go
type GenericRestoreExposeParam struct {
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *CacheVolumeInfo
}
type PodVolumeExposeParam struct {
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *repocache.CacheConfigs
}
type CacheConfigs struct {
// StorageClass specifies the storage class for cache volumes
StorageClass string
// Limit specifies the maximum size of the cache data
Limit int64
// ResidentThreshold specifies the minimum size of the cache data to create a cache volume
ResidentThreshold int64
}
```
### Data Mover Pods
Data mover pods retrieve the cache volume directory from `cache-volume-path` parameter and pass it to Unified Repository.
If the directory is empty, Unified Repository uses the resident location for data cache, that is, the root file system.
### Kopia Repository
Kopia repository supports cache directory configuration for both metadata and data. The existing `SetupConnectOptions` is modified to customize the `CacheDirectory`:
```go
func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions {
...
return repo.ConnectOptions{
CachingOptions: content.CachingOptions{
CacheDirectory: cacheDir,
...
},
...
}
}
```
[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
[2]: Implemented/vgdp-micro-service/vgdp-micro-service.md
[3]: Implemented/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md
[4]: Implemented/repo_maintenance_job_config.md
[5]: Implemented/backup-repo-config.md

View File

@@ -1,115 +0,0 @@
# Wildcard Namespace Support
## Abstract
Velero currently treats namespace patterns with glob characters as literal strings. This design adds wildcard expansion to support flexible namespace selection using patterns like `app-*` or `test-{dev,staging}`.
## Background
Requested in [#1874](https://github.com/vmware-tanzu/velero/issues/1874) for more flexible namespace selection.
## Goals
- Support glob pattern expansion in namespace includes/excludes
- Maintain backward compatibility with existing `*` behavior
## Non-Goals
- Complex regex patterns beyond basic globs
## High-Level Design
Wildcard expansion occurs early in both backup and restore flows, converting patterns to literal namespace lists before normal processing.
### Backup Flow
Expansion happens in `getResourceItems()` before namespace collection:
1. Check if wildcards exist using `ShouldExpandWildcards()`
2. Expand patterns against active cluster namespaces
3. Replace includes/excludes with expanded literal namespaces
4. Continue with normal backup processing
### Restore Flow
Expansion occurs in `execute()` after parsing backup contents:
1. Extract available namespaces from backup tar
2. Expand patterns against backup namespaces (not cluster namespaces)
3. Update restore context with expanded namespaces
4. Continue with normal restore processing
This ensures restore wildcards match actual backup contents, not current cluster state.
## Detailed Design
### Status Fields
Add wildcard expansion tracking to backup and restore CRDs:
```go
type WildcardNamespaceStatus struct {
// IncludeWildcardMatches records namespaces that matched include patterns
// +optional
IncludeWildcardMatches []string `json:"includeWildcardMatches,omitempty"`
// ExcludeWildcardMatches records namespaces that matched exclude patterns
// +optional
ExcludeWildcardMatches []string `json:"excludeWildcardMatches,omitempty"`
// WildcardResult records final namespaces after wildcard processing
// +optional
WildcardResult []string `json:"wildcardResult,omitempty"`
}
// Added to both BackupStatus and RestoreStatus
type BackupStatus struct {
// WildcardNamespaces contains wildcard expansion results
// +optional
WildcardNamespaces *WildcardNamespaceStatus `json:"wildcardNamespaces,omitempty"`
}
```
### Wildcard Expansion Package
New `pkg/util/wildcard/expand.go` package provides:
- `ShouldExpandWildcards()` - Skip expansion for simple "*" case
- `ExpandWildcards()` - Main expansion function using `github.com/gobwas/glob`
- Pattern validation rejecting unsupported regex symbols
**Supported patterns**: `*`, `?`, `[abc]`, `{a,b,c}`
**Unsupported**: `|()`, `**`
### Implementation Details
#### Backup Integration (`pkg/backup/item_collector.go`)
Expansion in `getResourceItems()`:
- Call `wildcard.ExpandWildcards()` with cluster namespaces
- Update `NamespaceIncludesExcludes` with expanded results
- Populate status fields with expansion results
#### Restore Integration (`pkg/restore/restore.go`)
Expansion in `execute()`:
```go
if wildcard.ShouldExpandWildcards(includes, excludes) {
availableNamespaces := extractNamespacesFromBackup(backupResources)
expandedIncludes, expandedExcludes, err := wildcard.ExpandWildcards(
availableNamespaces, includes, excludes)
// Update context and status
}
```
## Alternatives Considered
1. **Client-side expansion**: Rejected because it wouldn't work for scheduled backups
2. **Expansion in `collectNamespaces`**: Rejected because these functions expect literal namespaces
## Compatibility
Maintains full backward compatibility - existing "*" behavior unchanged.
## Implementation
Target: Velero 1.18

18
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.25.0
go 1.24.0
require (
cloud.google.com/go/storage v1.55.0
@@ -41,9 +41,10 @@ require (
github.com/stretchr/testify v1.10.0
github.com/vmware-tanzu/crash-diagnostics v0.3.7
go.uber.org/zap v1.27.0
golang.org/x/mod v0.29.0
golang.org/x/mod v0.26.0
golang.org/x/net v0.42.0
golang.org/x/oauth2 v0.30.0
golang.org/x/text v0.31.0
golang.org/x/text v0.27.0
google.golang.org/api v0.241.0
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
@@ -179,14 +180,13 @@ require (
go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.34.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect

32
go.sum
View File

@@ -794,8 +794,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -833,8 +833,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -880,8 +880,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -908,8 +908,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -973,14 +973,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -990,8 +990,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1051,8 +1051,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$TARGETPLATFORM golang:1.25-bookworm
FROM --platform=$TARGETPLATFORM golang:1.24-bookworm
ARG GOPROXY
@@ -94,7 +94,7 @@ RUN ARCH=$(go env GOARCH) && \
chmod +x /usr/bin/goreleaser
# get golangci-lint
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.1
# install kubectl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl

View File

@@ -103,14 +103,6 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
snapCont.ResourceVersion = ""
if snapCont.Spec.VolumeSnapshotClassName != nil {
// Delete VolumeSnapshotClass from the VolumeSnapshotContent.
// This is necessary to make the deletion independent of the VolumeSnapshotClass.
snapCont.Spec.VolumeSnapshotClassName = nil
p.log.Debugf("Deleted VolumeSnapshotClassName from VolumeSnapshotContent %s to make deletion independent of VolumeSnapshotClass",
snapCont.Name)
}
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
}

View File

@@ -70,7 +70,7 @@ func TestVSCExecute(t *testing.T) {
},
{
name: "Normal case, VolumeSnapshot should be deleted",
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).VolumeSnapshotClassName("volumesnapshotclass").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
expectErr: false,
function: func(
@@ -82,7 +82,7 @@ func TestVSCExecute(t *testing.T) {
},
},
{
name: "Error case, deletion fails",
name: "Normal case, VolumeSnapshot should be deleted",
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
expectErr: true,

View File

@@ -170,9 +170,6 @@ type SnapshotDataMovementInfo struct {
// Moved snapshot data size.
Size int64 `json:"size"`
// Moved snapshot incremental size.
IncrementalSize int64 `json:"incrementalSize,omitempty"`
// The DataUpload's Status.Phase value
Phase velerov2alpha1.DataUploadPhase
}
@@ -220,9 +217,6 @@ type PodVolumeInfo struct {
// The snapshot corresponding volume size.
Size int64 `json:"size,omitempty"`
// The incremental snapshot size.
IncrementalSize int64 `json:"incrementalSize,omitempty"`
// The type of the uploader that uploads the data. The valid values are `kopia` and `restic`.
UploaderType string `json:"uploaderType"`
@@ -246,15 +240,14 @@ type PodVolumeInfo struct {
func newPodVolumeInfoFromPVB(pvb *velerov1api.PodVolumeBackup) *PodVolumeInfo {
return &PodVolumeInfo{
SnapshotHandle: pvb.Status.SnapshotID,
Size: pvb.Status.Progress.TotalBytes,
IncrementalSize: pvb.Status.IncrementalBytes,
UploaderType: pvb.Spec.UploaderType,
VolumeName: pvb.Spec.Volume,
PodName: pvb.Spec.Pod.Name,
PodNamespace: pvb.Spec.Pod.Namespace,
NodeName: pvb.Spec.Node,
Phase: pvb.Status.Phase,
SnapshotHandle: pvb.Status.SnapshotID,
Size: pvb.Status.Progress.TotalBytes,
UploaderType: pvb.Spec.UploaderType,
VolumeName: pvb.Spec.Volume,
PodName: pvb.Spec.Pod.Name,
PodNamespace: pvb.Spec.Pod.Namespace,
NodeName: pvb.Spec.Node,
Phase: pvb.Status.Phase,
}
}

View File

@@ -288,7 +288,7 @@ const (
// BackupPhase is a string representation of the lifecycle phase
// of a Velero backup.
// +kubebuilder:validation:Enum=New;Queued;ReadyToStart;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting
// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting
type BackupPhase string
const (
@@ -296,12 +296,6 @@ const (
// yet processed by the BackupController.
BackupPhaseNew BackupPhase = "New"
// BackupPhaseQueued means the backup has been added to the queue and is waiting for the Queue to move it out of the queue.
BackupPhaseQueued BackupPhase = "Queued"
// BackupPhaseReadyToStart means the backup has been pulled from the queue and is ready to start.
BackupPhaseReadyToStart BackupPhase = "ReadyToStart"
// BackupPhaseFailedValidation means the backup has failed
// the controller's validations and therefore will not run.
BackupPhaseFailedValidation BackupPhase = "FailedValidation"
@@ -377,11 +371,6 @@ type BackupStatus struct {
// +optional
Phase BackupPhase `json:"phase,omitempty"`
// QueuePosition is the position of the backup in the queue.
// Only relevant when Phase is "Queued"
// +optional
QueuePosition int `json:"queuePosition,omitempty"`
// ValidationErrors is a slice of all validation errors (if
// applicable).
// +optional

View File

@@ -118,10 +118,6 @@ type PodVolumeBackupStatus struct {
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// IncrementalBytes holds the number of bytes new or changed since the last backup
// +optional
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
// The server's time is used for AcceptedTimestamp
// +optional
@@ -138,7 +134,6 @@ type PodVolumeBackupStatus struct {
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"

View File

@@ -58,10 +58,6 @@ type PodVolumeRestoreSpec struct {
// Cancel indicates request to cancel the ongoing PodVolumeRestore. It can be set
// when the PodVolumeRestore is in InProgress phase
Cancel bool `json:"cancel,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.

View File

@@ -58,10 +58,6 @@ type DataDownloadSpec struct {
// NodeOS is OS of the node where the DataDownload is processed.
// +optional
NodeOS NodeOS `json:"nodeOS,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}
// TargetVolumeSpec is the specification for a target PVC.

View File

@@ -155,10 +155,6 @@ type DataUploadStatus struct {
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// IncrementalBytes holds the number of bytes new or changed since the last backup
// +optional
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
// Node is name of the node where the DataUpload is processed.
// +optional
Node string `json:"node,omitempty"`
@@ -189,7 +185,6 @@ type DataUploadStatus struct {
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed"
@@ -249,8 +244,4 @@ type DataUploadResult struct {
// NodeOS is OS of the node where the DataUpload is processed.
// +optional
NodeOS NodeOS `json:"nodeOS,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}

View File

@@ -621,30 +621,8 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace)
}
// Filter PVCs by volume policy
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup)
if err != nil {
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
}
// Warn if any PVCs were filtered out
if len(filteredPVCs) < len(groupedPVCs) {
for _, originalPVC := range groupedPVCs {
found := false
for _, filteredPVC := range filteredPVCs {
if originalPVC.Name == filteredPVC.Name {
found = true
break
}
}
if !found {
p.log.Warnf("PVC %s/%s has VolumeGroupSnapshot label %s=%s but is excluded by volume policy", originalPVC.Namespace, originalPVC.Name, vgsLabelKey, group)
}
}
}
// Determine the CSI driver for the grouped PVCs
driver, err := p.determineCSIDriver(filteredPVCs)
driver, err := p.determineCSIDriver(groupedPVCs)
if err != nil {
return nil, errors.Wrapf(err, "failed to determine CSI driver for PVCs in VolumeGroupSnapshot group %q", group)
}
@@ -665,7 +643,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
}
// Wait for all the VS objects associated with the VGS to have status and VGS Name (VS readiness is checked in legacy flow) and get the PVC-to-VS map
vsMap, err := p.waitForVGSAssociatedVS(ctx, filteredPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
vsMap, err := p.waitForVGSAssociatedVS(ctx, groupedPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
if err != nil {
return nil, errors.Wrapf(err, "timeout waiting for VolumeSnapshots to have status created via VolumeGroupSnapshot %s", newVGS.Name)
}
@@ -756,40 +734,6 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
return pvcList.Items, nil
}
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
pvcs []corev1api.PersistentVolumeClaim,
backup *velerov1api.Backup,
) ([]corev1api.PersistentVolumeClaim, error) {
var filteredPVCs []corev1api.PersistentVolumeClaim
for _, pvc := range pvcs {
// Convert PVC to unstructured for ShouldPerformSnapshotWithBackup
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name)
}
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
// Check if this PVC should be snapshotted according to volume policies
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
unstructuredPVC,
kuberesource.PersistentVolumeClaims,
*backup,
p.crClient,
p.log,
)
if err != nil {
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
}
if shouldSnapshot {
filteredPVCs = append(filteredPVCs, pvc)
}
}
return filteredPVCs, nil
}
func (p *pvcBackupItemAction) determineCSIDriver(
pvcs []corev1api.PersistentVolumeClaim,
) (string, error) {

View File

@@ -586,280 +586,6 @@ func TestListGroupedPVCs(t *testing.T) {
}
}
func TestFilterPVCsByVolumePolicy(t *testing.T) {
tests := []struct {
name string
pvcs []corev1api.PersistentVolumeClaim
pvs []corev1api.PersistentVolume
volumePolicyStr string
expectCount int
expectError bool
}{
{
name: "All PVCs should be included when no volume policy",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-1", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-1",
StorageClassName: pointer.String("sc-1"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-2",
StorageClassName: pointer.String("sc-1"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-1"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-2"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
},
},
},
},
expectCount: 2,
},
{
name: "Filter out NFS PVC by volume policy",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-csi",
StorageClassName: pointer.String("sc-1"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export",
},
},
},
},
},
volumePolicyStr: `
version: v1
volumePolicies:
- conditions:
nfs: {}
action:
type: skip
`,
expectCount: 1,
},
{
name: "All PVCs filtered out by volume policy",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-1", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs-1",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-2", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs-2",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-1"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export/1",
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-2"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export/2",
},
},
},
},
},
volumePolicyStr: `
version: v1
volumePolicies:
- conditions:
nfs: {}
action:
type: skip
`,
expectCount: 0,
},
{
name: "Filter out non-CSI PVCs from mixed driver group",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-linstor",
Namespace: "ns-1",
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-linstor",
StorageClassName: pointer.String("sc-linstor"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-nfs",
Namespace: "ns-1",
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-linstor"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "linstor.csi.linbit.com"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export",
},
},
},
},
},
volumePolicyStr: `
version: v1
volumePolicies:
- conditions:
nfs: {}
action:
type: skip
`,
expectCount: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
objs := []runtime.Object{}
for i := range tt.pvs {
objs = append(objs, &tt.pvs[i])
}
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
backup := &velerov1api.Backup{
ObjectMeta: metav1.ObjectMeta{
Name: "test-backup",
Namespace: "velero",
},
Spec: velerov1api.BackupSpec{},
}
// Add volume policy ConfigMap if specified
if tt.volumePolicyStr != "" {
cm := &corev1api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "volume-policy",
Namespace: "velero",
},
Data: map[string]string{
"volume-policy": tt.volumePolicyStr,
},
}
require.NoError(t, client.Create(t.Context(), cm))
backup.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{
Kind: "ConfigMap",
Name: "volume-policy",
}
}
action := &pvcBackupItemAction{
log: velerotest.NewLogger(),
crClient: client,
}
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup)
if tt.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Len(t, result, tt.expectCount)
// For mixed driver scenarios, verify filtered result can determine single CSI driver
if tt.name == "Filter out non-CSI PVCs from mixed driver group" && len(result) > 0 {
driver, err := action.determineCSIDriver(result)
require.NoError(t, err, "After filtering, determineCSIDriver should not fail with multiple drivers error")
require.Equal(t, "linstor.csi.linbit.com", driver, "Should have the Linstor driver after filtering out NFS")
}
}
})
}
}
func TestDetermineCSIDriver(t *testing.T) {
tests := []struct {
name string

View File

@@ -84,6 +84,17 @@ func (p *volumeSnapshotBackupItemAction) Execute(
return nil, nil, "", nil, errors.WithStack(err)
}
additionalItems := make([]velero.ResourceIdentifier, 0)
if vs.Spec.VolumeSnapshotClassName != nil {
additionalItems = append(
additionalItems,
velero.ResourceIdentifier{
GroupResource: kuberesource.VolumeSnapshotClasses,
Name: *vs.Spec.VolumeSnapshotClassName,
},
)
}
if backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
p.log.
@@ -94,24 +105,6 @@ func (p *volumeSnapshotBackupItemAction) Execute(
return item, nil, "", nil, nil
}
additionalItems := make([]velero.ResourceIdentifier, 0)
if vs.Spec.VolumeSnapshotClassName != nil {
// This is still needed to add the VolumeSnapshotClass to the backup.
// The secret with VolumeSnapshotClass is still relevant to backup.
additionalItems = append(
additionalItems,
velero.ResourceIdentifier{
GroupResource: kuberesource.VolumeSnapshotClasses,
Name: *vs.Spec.VolumeSnapshotClassName,
},
)
// Because async operation will update VolumeSnapshot during finalizing phase.
// No matter what we do, VolumeSnapshotClass cannot be deleted. So skip it.
// Just deleting VolumeSnapshotClass during restore and delete is enough.
}
p.log.Infof("Getting VolumesnapshotContent for Volumesnapshot %s/%s",
vs.Namespace, vs.Name)

View File

@@ -97,10 +97,6 @@ func (p *volumeSnapshotContentBackupItemAction) Execute(
})
}
// Because async operation will update VolumeSnapshotContent during finalizing phase.
// No matter what we do, VolumeSnapshotClass cannot be deleted. So skip it.
// Just deleting VolumeSnapshotClass during restore and delete is enough.
snapContMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&snapCont)
if err != nil {
return nil, nil, "", nil, errors.WithStack(err)

View File

@@ -42,7 +42,7 @@ func TestVSCExecute(t *testing.T) {
expectedItems []velero.ResourceIdentifier
}{
{
name: "Invalid VolumeSnapshotContent",
name: "Invalid VolumeSnapshotClass",
item: velerotest.UnstructuredOrDie(
`
{

View File

@@ -117,6 +117,7 @@ type kubernetesBackupper struct {
podCommandExecutor podexec.PodCommandExecutor
podVolumeBackupperFactory podvolume.BackupperFactory
podVolumeTimeout time.Duration
podVolumeContext context.Context
defaultVolumesToFsBackup bool
clientPageSize int
uploaderType string
@@ -167,39 +168,10 @@ func NewKubernetesBackupper(
}, nil
}
// getNamespaceIncludesExcludesAndArgoCDNamespaces returns an IncludesExcludes list containing which namespaces to
// include and exclude from the backup and a list of namespaces managed by ArgoCD.
func getNamespaceIncludesExcludesAndArgoCDNamespaces(backup *velerov1api.Backup, kbClient kbclient.Client) (*collections.NamespaceIncludesExcludes, []string, error) {
nsList := corev1api.NamespaceList{}
activeNamespaces := []string{}
nsManagedByArgoCD := []string{}
if err := kbClient.List(context.Background(), &nsList); err != nil {
return nil, nsManagedByArgoCD, err
}
for _, ns := range nsList.Items {
activeNamespaces = append(activeNamespaces, ns.Name)
}
// Set ActiveNamespaces first, then set includes/excludes
includesExcludes := collections.NewNamespaceIncludesExcludes().
ActiveNamespaces(activeNamespaces).
Includes(backup.Spec.IncludedNamespaces...).
Excludes(backup.Spec.ExcludedNamespaces...)
// Expand wildcards if needed
if err := includesExcludes.ExpandIncludesExcludes(); err != nil {
return nil, []string{}, err
}
// Check for ArgoCD managed namespaces in the namespaces that will be included
for _, ns := range nsList.Items {
nsLabels := ns.GetLabels()
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 && includesExcludes.ShouldInclude(ns.Name) {
nsManagedByArgoCD = append(nsManagedByArgoCD, ns.Name)
}
}
return includesExcludes, nsManagedByArgoCD, nil
// getNamespaceIncludesExcludes returns an IncludesExcludes list containing which namespaces to
// include and exclude from the backup.
func getNamespaceIncludesExcludes(backup *velerov1api.Backup) *collections.IncludesExcludes {
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
}
func getResourceHooks(hookSpecs []velerov1api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]hook.ResourceHook, error) {
@@ -273,35 +245,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
if err := kb.writeBackupVersion(tw); err != nil {
return errors.WithStack(err)
}
var err error
var nsManagedByArgoCD []string
backupRequest.NamespaceIncludesExcludes, nsManagedByArgoCD, err = getNamespaceIncludesExcludesAndArgoCDNamespaces(backupRequest.Backup, kb.kbClient)
if err != nil {
log.WithError(err).Errorf("error getting namespace includes/excludes")
return err
}
if backupRequest.NamespaceIncludesExcludes.IsWildcardExpanded() {
expandedIncludes := backupRequest.NamespaceIncludesExcludes.GetIncludes()
expandedExcludes := backupRequest.NamespaceIncludesExcludes.GetExcludes()
// Get the final namespace list after wildcard expansion
wildcardResult, err := backupRequest.NamespaceIncludesExcludes.ResolveNamespaceList()
if err != nil {
log.WithError(err).Errorf("error resolving namespace list")
return err
}
log.WithFields(logrus.Fields{
"expandedIncludes": expandedIncludes,
"expandedExcludes": expandedExcludes,
"wildcardResult": wildcardResult,
"includedCount": len(expandedIncludes),
"excludedCount": len(expandedExcludes),
"resultCount": len(wildcardResult),
}).Info("Successfully expanded wildcard patterns")
}
backupRequest.NamespaceIncludesExcludes = getNamespaceIncludesExcludes(backupRequest.Backup)
log.Infof("Including namespaces: %s", backupRequest.NamespaceIncludesExcludes.IncludesString())
log.Infof("Excluding namespaces: %s", backupRequest.NamespaceIncludesExcludes.ExcludesString())
@@ -309,8 +254,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
// We will check for the existence of a ArgoCD label in the includedNamespaces and add a warning
// so that users are at least aware about the existence of argoCD managed ns in their backup
// Related Issue: https://github.com/vmware-tanzu/velero/issues/7905
if len(nsManagedByArgoCD) > 0 {
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
if len(backupRequest.Spec.IncludedNamespaces) > 0 {
nsManagedByArgoCD := getNamespacesManagedByArgoCD(kb.kbClient, backupRequest.Spec.IncludedNamespaces, log)
if len(nsManagedByArgoCD) > 0 {
log.Warnf("backup operation may encounter complications and potentially produce undesirable results due to the inclusion of namespaces %v managed by ArgoCD in the backup.", nsManagedByArgoCD)
}
}
if collections.UseOldResourceFilters(backupRequest.Spec) {
@@ -335,6 +284,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
log.Infof("Backing up all volumes using pod volume backup: %t", boolptr.IsSetToTrue(backupRequest.Backup.Spec.DefaultVolumesToFsBackup))
var err error
backupRequest.ResourceHooks, err = getResourceHooks(backupRequest.Spec.Hooks.Resources, kb.discoveryHelper)
if err != nil {
log.WithError(errors.WithStack(err)).Debugf("Error from getResourceHooks")
@@ -364,12 +314,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
}
var podVolumeCancelFunc context.CancelFunc
podVolumeContext, podVolumeCancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
kb.podVolumeContext, podVolumeCancelFunc = context.WithTimeout(context.Background(), podVolumeTimeout)
defer podVolumeCancelFunc()
var podVolumeBackupper podvolume.Backupper
if kb.podVolumeBackupperFactory != nil {
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(kb.podVolumeContext, log, backupRequest.Backup, kb.uploaderType)
if err != nil {
log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper")
return errors.WithStack(err)
@@ -415,7 +365,6 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
kbClient: kb.kbClient,
discoveryHelper: kb.discoveryHelper,
podVolumeBackupper: podVolumeBackupper,
podVolumeContext: podVolumeContext,
podVolumeSnapshotTracker: podvolume.NewTracker(),
volumeSnapshotterCache: NewVolumeSnapshotterCache(volumeSnapshotterGetter),
itemHookHandler: &hook.DefaultItemHookHandler{
@@ -597,7 +546,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
log.Infof("Backing Up Item Block including %s %s/%s (%v items in block)", items[i].groupResource.String(), items[i].namespace, items[i].name, len(itemBlock.Items))
wg.Add(1)
backupRequest.WorkerPool.GetInputChannel() <- ItemBlockInput{
backupRequest.ItemBlockChannel <- ItemBlockInput{
itemBlock: itemBlock,
returnChan: itemBlockReturn,
}
@@ -848,7 +797,7 @@ func (kb *kubernetesBackupper) handleItemBlockPostHooks(itemBlock *BackupItemBlo
log := itemBlock.Log
// the post hooks will not execute until all PVBs of the item block pods are processed
if err := kb.waitUntilPVBsProcessed(itemBlock.itemBackupper.podVolumeContext, log, itemBlock, hookPods); err != nil {
if err := kb.waitUntilPVBsProcessed(kb.podVolumeContext, log, itemBlock, hookPods); err != nil {
log.WithError(err).Error("failed to wait PVBs processed for the ItemBlock")
return
}
@@ -1249,7 +1198,6 @@ func updateVolumeInfos(
volumeInfos[index].SnapshotDataMovementInfo.SnapshotHandle = dataUpload.Status.SnapshotID
volumeInfos[index].SnapshotDataMovementInfo.RetainedSnapshot = dataUpload.Spec.CSISnapshot.VolumeSnapshot
volumeInfos[index].SnapshotDataMovementInfo.Size = dataUpload.Status.Progress.TotalBytes
volumeInfos[index].SnapshotDataMovementInfo.IncrementalSize = dataUpload.Status.IncrementalBytes
volumeInfos[index].SnapshotDataMovementInfo.Phase = dataUpload.Status.Phase
if dataUpload.Status.Phase == velerov2alpha1.DataUploadPhaseCompleted {
@@ -1307,3 +1255,26 @@ func putVolumeInfos(
return backupStore.PutBackupVolumeInfos(backupName, backupVolumeInfoBuf)
}
func getNamespacesManagedByArgoCD(kbClient kbclient.Client, includedNamespaces []string, log logrus.FieldLogger) []string {
var nsManagedByArgoCD []string
for _, nsName := range includedNamespaces {
ns := corev1api.Namespace{}
if err := kbClient.Get(context.Background(), kbclient.ObjectKey{Name: nsName}, &ns); err != nil {
// check for only those ns that exist and are included in backup
// here we ignore cases like "" or "*" specified under includedNamespaces
if apierrors.IsNotFound(err) {
continue
}
log.WithError(err).Errorf("error getting namespace %s", nsName)
continue
}
nsLabels := ns.GetLabels()
if len(nsLabels[ArgoCDManagedByNamespaceLabel]) > 0 {
nsManagedByArgoCD = append(nsManagedByArgoCD, nsName)
}
}
return nsManagedByArgoCD
}

View File

@@ -79,7 +79,7 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: &h.itemBlockPool,
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
}
backupFile := bytes.NewBuffer([]byte{})
@@ -141,7 +141,7 @@ func TestBackupProgressIsUpdated(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: &h.itemBlockPool,
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
}
backupFile := bytes.NewBuffer([]byte{})
@@ -881,7 +881,7 @@ func TestBackupOldResourceFiltering(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -1062,7 +1062,7 @@ func TestCRDInclusion(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -1161,7 +1161,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -1190,7 +1190,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: &h.itemBlockPool,
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
}
backup1File := bytes.NewBuffer([]byte{})
@@ -1206,7 +1206,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: &h.itemBlockPool,
ItemBlockChannel: h.itemBlockPool.GetInputChannel(),
}
backup2File := bytes.NewBuffer([]byte{})
@@ -1260,7 +1260,7 @@ func TestBackupResourceOrdering(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -1381,7 +1381,7 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
Backup: defaultBackup().SnapshotVolumes(false).Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
resPolicies: &resourcepolicies.ResourcePolicies{
Version: "v1",
@@ -1428,8 +1428,8 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
},
includedPVs: map[string]struct{}{},
},
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
BackedUpItems: NewBackedUpItemsMap(),
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVCs(
@@ -1679,7 +1679,7 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -1764,7 +1764,7 @@ func TestBackupWithInvalidActions(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -1918,7 +1918,7 @@ func TestBackupActionModifications(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -2178,7 +2178,7 @@ func TestBackupActionAdditionalItems(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -2439,7 +2439,7 @@ func TestItemBlockActionsRunForCorrectItems(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -2524,7 +2524,7 @@ func TestBackupWithInvalidItemBlockActions(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -2780,7 +2780,7 @@ func TestItemBlockActionRelatedItems(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -2948,7 +2948,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -2984,7 +2984,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3021,7 +3021,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3058,7 +3058,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3095,7 +3095,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3130,7 +3130,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3148,7 +3148,7 @@ func TestBackupWithSnapshots(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3169,7 +3169,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3188,7 +3188,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3210,7 +3210,7 @@ func TestBackupWithSnapshots(t *testing.T) {
},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.PVs(
@@ -3344,7 +3344,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.Pods(
@@ -3376,7 +3376,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.Pods(
@@ -3408,7 +3408,7 @@ func TestBackupWithAsyncOperations(t *testing.T) {
Backup: defaultBackup().Result(),
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
},
apiResources: []*test.APIResource{
test.Pods(
@@ -3494,7 +3494,7 @@ func TestBackupWithInvalidHooks(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -3968,7 +3968,7 @@ func TestBackupWithHooks(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
podCommandExecutor = new(test.MockPodCommandExecutor)
@@ -4193,7 +4193,7 @@ func TestBackupWithPodVolume(t *testing.T) {
SnapshotLocations: []*velerov1.VolumeSnapshotLocation{tc.vsl},
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -5312,7 +5312,7 @@ func TestBackupNewResourceFiltering(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -5477,7 +5477,7 @@ func TestBackupNamespaces(t *testing.T) {
Backup: tc.backup,
SkippedPVTracker: NewSkipPVTracker(),
BackedUpItems: NewBackedUpItemsMap(),
WorkerPool: itemBlockPool,
ItemBlockChannel: itemBlockPool.GetInputChannel(),
}
backupFile = bytes.NewBuffer([]byte{})
)
@@ -5578,7 +5578,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseFailed).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
@@ -5604,7 +5603,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
RetainedSnapshot: "vs-1",
SnapshotHandle: "snapshot-id",
Size: 1000,
IncrementalSize: 500,
Phase: velerov2alpha1.DataUploadPhaseFailed,
},
},
@@ -5618,7 +5616,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
@@ -5644,7 +5641,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
RetainedSnapshot: "vs-1",
SnapshotHandle: "snapshot-id",
Size: 1000,
IncrementalSize: 500,
Phase: velerov2alpha1.DataUploadPhaseCompleted,
},
},
@@ -5659,7 +5655,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").

View File

@@ -69,7 +69,6 @@ type itemBackupper struct {
kbClient kbClient.Client
discoveryHelper discovery.Helper
podVolumeBackupper podvolume.Backupper
podVolumeContext context.Context
podVolumeSnapshotTracker *podvolume.Tracker
kubernetesBackupper *kubernetesBackupper
volumeSnapshotterCache *VolumeSnapshotterCache

View File

@@ -71,7 +71,7 @@ type itemCollector struct {
type nsTracker struct {
singleLabelSelector labels.Selector
orLabelSelector []labels.Selector
namespaceFilter *collections.NamespaceIncludesExcludes
namespaceFilter *collections.IncludesExcludes
logger logrus.FieldLogger
namespaceMap map[string]bool
@@ -103,7 +103,7 @@ func (nt *nsTracker) init(
unstructuredNSs []unstructured.Unstructured,
singleLabelSelector labels.Selector,
orLabelSelector []labels.Selector,
namespaceFilter *collections.NamespaceIncludesExcludes,
namespaceFilter *collections.IncludesExcludes,
logger logrus.FieldLogger,
) {
if nt.namespaceMap == nil {
@@ -635,7 +635,7 @@ func coreGroupResourcePriority(resource string) int {
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
func getNamespacesToList(ie *collections.NamespaceIncludesExcludes) []string {
func getNamespacesToList(ie *collections.IncludesExcludes) []string {
if ie == nil {
return []string{""}
}
@@ -753,28 +753,21 @@ func (r *itemCollector) collectNamespaces(
}
unstructuredList, err := resourceClient.List(metav1.ListOptions{})
activeNamespacesHashSet := make(map[string]bool)
for _, namespace := range unstructuredList.Items {
activeNamespacesHashSet[namespace.GetName()] = true
}
if err != nil {
log.WithError(errors.WithStack(err)).Error("error list namespaces")
return nil, errors.WithStack(err)
}
// Change to look at the struct includes/excludes
// In case wildcards are expanded, we need to look at the struct includes/excludes
for _, includedNSName := range r.backupRequest.NamespaceIncludesExcludes.GetIncludes() {
for _, includedNSName := range r.backupRequest.Backup.Spec.IncludedNamespaces {
nsExists := false
// Skip checking the namespace existing when it's "*".
if includedNSName == "*" {
continue
}
if _, ok := activeNamespacesHashSet[includedNSName]; ok {
nsExists = true
for _, unstructuredNS := range unstructuredList.Items {
if unstructuredNS.GetName() == includedNSName {
nsExists = true
}
}
if !nsExists {
@@ -816,18 +809,17 @@ func (r *itemCollector) collectNamespaces(
var items []*kubernetesResource
for index := range unstructuredList.Items {
nsName := unstructuredList.Items[index].GetName()
path, err := r.writeToFile(&unstructuredList.Items[index])
if err != nil {
log.WithError(err).Errorf("Error writing item %s to file", nsName)
log.WithError(err).Errorf("Error writing item %s to file",
unstructuredList.Items[index].GetName())
continue
}
items = append(items, &kubernetesResource{
groupResource: gr,
preferredGVR: preferredGVR,
name: nsName,
name: unstructuredList.Items[index].GetName(),
path: path,
kind: resource.Kind,
})

View File

@@ -153,7 +153,7 @@ func TestFilterNamespaces(t *testing.T) {
func TestItemCollectorBackupNamespaces(t *testing.T) {
tests := []struct {
name string
ie *collections.NamespaceIncludesExcludes
ie *collections.IncludesExcludes
namespaces []*corev1api.Namespace
backup *velerov1api.Backup
expectedTrackedNS []string
@@ -162,7 +162,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
{
name: "ns filter by namespace IE filter",
backup: builder.ForBackup("velero", "backup").Result(),
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1"),
ie: collections.NewIncludesExcludes().Includes("ns1"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -174,7 +174,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{"name": "ns1"},
}).Result(),
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
ie: collections.NewIncludesExcludes().Includes("*"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -186,7 +186,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
{MatchLabels: map[string]string{"name": "ns1"}},
}).Result(),
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
ie: collections.NewIncludesExcludes().Includes("*"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -198,7 +198,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{"name": "ns1"},
}).Result(),
ie: collections.NewNamespaceIncludesExcludes().Excludes("ns1"),
ie: collections.NewIncludesExcludes().Excludes("ns1"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -210,7 +210,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
{MatchLabels: map[string]string{"name": "ns1"}},
}).Result(),
ie: collections.NewNamespaceIncludesExcludes().Excludes("ns1", "ns2"),
ie: collections.NewIncludesExcludes().Excludes("ns1", "ns2"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -221,7 +221,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
{
name: "No ns filters",
backup: builder.ForBackup("velero", "backup").Result(),
ie: collections.NewNamespaceIncludesExcludes().Includes("*"),
ie: collections.NewIncludesExcludes().Includes("*"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -231,7 +231,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
{
name: "ns specified by the IncludeNamespaces cannot be found",
backup: builder.ForBackup("velero", "backup").IncludedNamespaces("ns1", "invalid", "*").Result(),
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1", "invalid", "*"),
ie: collections.NewIncludesExcludes().Includes("ns1", "invalid", "*"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Phase(corev1api.NamespaceActive).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),
@@ -242,7 +242,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
{
name: "terminating ns should not tracked",
backup: builder.ForBackup("velero", "backup").Result(),
ie: collections.NewNamespaceIncludesExcludes().Includes("ns1", "ns2"),
ie: collections.NewIncludesExcludes().Includes("ns1", "ns2"),
namespaces: []*corev1api.Namespace{
builder.ForNamespace("ns1").Phase(corev1api.NamespaceTerminating).Result(),
builder.ForNamespace("ns2").Phase(corev1api.NamespaceActive).Result(),

View File

@@ -57,7 +57,7 @@ type Request struct {
*velerov1api.Backup
StorageLocation *velerov1api.BackupStorageLocation
SnapshotLocations []*velerov1api.VolumeSnapshotLocation
NamespaceIncludesExcludes *collections.NamespaceIncludesExcludes
NamespaceIncludesExcludes *collections.IncludesExcludes
ResourceIncludesExcludes collections.IncludesExcludesInterface
ResourceHooks []hook.ResourceHook
ResolvedActions []framework.BackupItemResolvedActionV2
@@ -69,7 +69,7 @@ type Request struct {
ResPolicies *resourcepolicies.Policies
SkippedPVTracker *skipPVTracker
VolumesInformation volume.BackupVolumesInformation
WorkerPool *ItemBlockWorkerPool
ItemBlockChannel chan ItemBlockInput
}
// BackupVolumesInformation contains the information needs by generating
@@ -103,7 +103,3 @@ func (r *Request) FillVolumesInformation() {
r.VolumesInformation.BackupOperations = *r.GetItemOperationsList()
r.VolumesInformation.BackupName = r.Backup.Name
}
func (r *Request) StopWorkerPool() {
r.WorkerPool.Stop()
}

View File

@@ -222,12 +222,6 @@ func (b *BackupBuilder) Phase(phase velerov1api.BackupPhase) *BackupBuilder {
return b
}
// Phase sets the Backup's queue position.
func (b *BackupBuilder) QueuePosition(queuePos int) *BackupBuilder {
b.object.Status.QueuePosition = queuePos
return b
}
// StorageLocation sets the Backup's storage location.
func (b *BackupBuilder) StorageLocation(location string) *BackupBuilder {
b.object.Spec.StorageLocation = location

View File

@@ -145,12 +145,6 @@ func (d *DataUploadBuilder) Progress(progress shared.DataMoveOperationProgress)
return d
}
// IncrementalBytes sets the DataUpload's IncrementalBytes.
func (d *DataUploadBuilder) IncrementalBytes(incrementalBytes int64) *DataUploadBuilder {
d.object.Status.IncrementalBytes = incrementalBytes
return d
}
// Node sets the DataUpload's Node.
func (d *DataUploadBuilder) Node(node string) *DataUploadBuilder {
d.object.Status.Node = node
@@ -186,9 +180,3 @@ func (d *DataUploadBuilder) Message(msg string) *DataUploadBuilder {
d.object.Status.Message = msg
return d
}
// TotalBytes sets the DataUpload's TotalBytes.
func (d *DataUploadBuilder) TotalBytes(size int64) *DataUploadBuilder {
d.object.Status.Progress.TotalBytes = size
return d
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package builder
import (
corev1api "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -88,9 +87,3 @@ func (b *StorageClassBuilder) Provisioner(provisioner string) *StorageClassBuild
b.object.Provisioner = provisioner
return b
}
// ReclaimPolicy sets StorageClass's reclaimPolicy.
func (b *StorageClassBuilder) ReclaimPolicy(policy corev1api.PersistentVolumeReclaimPolicy) *StorageClassBuilder {
b.object.ReclaimPolicy = &policy
return b
}

View File

@@ -102,11 +102,6 @@ type StatusUpdater interface {
UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error)
}
// Applier applies changes to an object using server-side apply
type Applier interface {
Apply(name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error)
}
// Dynamic contains client methods that Velero needs for backing up and restoring resources.
type Dynamic interface {
Creator
@@ -116,7 +111,6 @@ type Dynamic interface {
Patcher
Deletor
StatusUpdater
Applier
}
// dynamicResourceClient implements Dynamic.
@@ -142,10 +136,6 @@ func (d *dynamicResourceClient) Get(name string, opts metav1.GetOptions) (*unstr
return d.resourceClient.Get(context.TODO(), name, opts)
}
func (d *dynamicResourceClient) Apply(name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) {
return d.resourceClient.Apply(context.TODO(), name, obj, opts)
}
func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.Unstructured, error) {
return d.resourceClient.Patch(context.TODO(), name, types.MergePatchType, data, metav1.PatchOptions{})
}

View File

@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
if err != nil {

View File

@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
if os.Getenv(cmdtest.CaptureFlag) == "1" {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)

View File

@@ -91,7 +91,7 @@ func TestNewDownloadCommand(t *testing.T) {
assert.NoError(t, e)
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
_, stderr, err := veleroexec.RunCommand(cmd)

View File

@@ -63,7 +63,7 @@ func TestNewGetCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
require.NoError(t, err)
@@ -84,7 +84,7 @@ func TestNewGetCommand(t *testing.T) {
e = d.Execute()
require.NoError(t, e)
cmd = exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd = exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err = veleroexec.RunCommand(cmd)
require.NoError(t, err)

View File

@@ -66,7 +66,7 @@ func TestNewDeleteCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)

View File

@@ -50,7 +50,7 @@ func TestNewGetCommand(t *testing.T) {
c.Execute()
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
_, stderr, err := veleroexec.RunCommand(cmd)

View File

@@ -99,7 +99,7 @@ func TestSetCommand_Execute(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
_, stderr, err := veleroexec.RunCommand(cmd)

View File

@@ -18,7 +18,6 @@ package bug
import (
"bytes"
"context"
"errors"
"fmt"
"net/url"
@@ -148,7 +147,7 @@ func getKubectlVersion() (string, error) {
return "", errors.New("kubectl not found on PATH")
}
kubectlCmd := exec.CommandContext(context.Background(), "kubectl", "version")
kubectlCmd := exec.Command("kubectl", "version")
var outbuf bytes.Buffer
kubectlCmd.Stdout = &outbuf
if err := kubectlCmd.Start(); err != nil {
@@ -208,17 +207,16 @@ func renderToString(bugInfo *VeleroBugInfo) (string, error) {
// a platform specific binary.
func showIssueInBrowser(body string) error {
url := issueURL + "?body=" + url.QueryEscape(body)
ctx := context.Background()
switch runtime.GOOS {
case "darwin":
return exec.CommandContext(ctx, "open", url).Start()
return exec.Command("open", url).Start()
case "linux":
if cmdExistsOnPath("xdg-open") {
return exec.CommandContext(ctx, "xdg-open", url).Start()
return exec.Command("xdg-open", url).Start()
}
return fmt.Errorf("velero can't open a browser window using the command '%s'", "xdg-open")
case "windows":
return exec.CommandContext(ctx, "rundll32", "url.dll,FileProtocolHandler", url).Start()
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
default:
return fmt.Errorf("velero can't open a browser window on platform %s", runtime.GOOS)
}

View File

@@ -53,7 +53,6 @@ type dataMoverRestoreConfig struct {
volumePath string
volumeMode string
ddName string
cacheDir string
resourceTimeout time.Duration
}
@@ -90,7 +89,6 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.volumeMode, "volume-mode", config.volumeMode, "The mode of the volume to be restored")
command.Flags().StringVar(&config.ddName, "data-download", config.ddName, "The data download name")
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
@@ -290,5 +288,5 @@ func (s *dataMoverRestore) createDataPathService() (dataPathService, error) {
return datamover.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.ddName, s.namespace, s.nodeName, datapath.AccessPoint{
ByPath: s.config.volumePath,
VolMode: uploader.PersistentVolumeMode(s.config.volumeMode),
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.config.cacheDir, s.logger), nil
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.logger), nil
}

View File

@@ -89,10 +89,8 @@ type Options struct {
RepoMaintenanceJobConfigMap string
NodeAgentConfigMap string
ItemBlockWorkerCount int
ConcurrentBackups int
NodeAgentDisableHostPath bool
kubeletRootDir string
Apply bool
ServerPriorityClassName string
NodeAgentPriorityClassName string
}
@@ -103,7 +101,6 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
flags.StringVar(&o.BucketName, "bucket", o.BucketName, "Name of the object storage bucket where backups should be stored")
flags.StringVar(&o.SecretFile, "secret-file", o.SecretFile, "File containing credentials for backup and volume provider. If not specified, --no-secret must be used for confirmation. Optional.")
flags.BoolVar(&o.NoSecret, "no-secret", o.NoSecret, "Flag indicating if a secret should be created. Must be used as confirmation if --secret-file is not provided. Optional.")
flags.BoolVar(&o.Apply, "apply", o.Apply, "Flag indicating if resources should be applied instead of created. This can be used for updating existing resources.")
flags.BoolVar(&o.NoDefaultBackupLocation, "no-default-backup-location", o.NoDefaultBackupLocation, "Flag indicating if a default backup location should be created. Must be used as confirmation if --bucket or --provider are not provided. Optional.")
flags.StringVar(&o.Image, "image", o.Image, "Image to use for the Velero and node agent pods. Optional.")
flags.StringVar(&o.Prefix, "prefix", o.Prefix, "Prefix under which all Velero data should be stored within the bucket. Optional.")
@@ -199,12 +196,6 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
o.ItemBlockWorkerCount,
"Number of worker threads to process ItemBlocks. Default is one. Optional.",
)
flags.IntVar(
&o.ConcurrentBackups,
"concurrent-backups",
o.ConcurrentBackups,
"Number of backups to process concurrently. Default is one. Optional.",
)
flags.StringVar(
&o.ServerPriorityClassName,
"server-priority-class-name",
@@ -322,7 +313,6 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
RepoMaintenanceJobConfigMap: o.RepoMaintenanceJobConfigMap,
NodeAgentConfigMap: o.NodeAgentConfigMap,
ItemBlockWorkerCount: o.ItemBlockWorkerCount,
ConcurrentBackups: o.ConcurrentBackups,
KubeletRootDir: o.kubeletRootDir,
NodeAgentDisableHostPath: o.NodeAgentDisableHostPath,
ServerPriorityClassName: o.ServerPriorityClassName,
@@ -418,7 +408,7 @@ func (o *Options) Run(c *cobra.Command, f client.Factory) error {
errorMsg := fmt.Sprintf("\n\nError installing Velero. Use `kubectl logs deploy/velero -n %s` to check the deploy logs", o.Namespace)
err = install.Install(dynamicFactory, kbClient, resources, os.Stdout, o.Apply)
err = install.Install(dynamicFactory, kbClient, resources, os.Stdout)
if err != nil {
return errors.Wrap(err, errorMsg)
}

View File

@@ -60,7 +60,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@@ -85,7 +84,6 @@ type nodeAgentServerConfig struct {
resourceTimeout time.Duration
dataMoverPrepareTimeout time.Duration
nodeAgentConfig string
backupRepoConfig string
}
func NewServerCommand(f client.Factory) *cobra.Command {
@@ -123,7 +121,6 @@ func NewServerCommand(f client.Factory) *cobra.Command {
command.Flags().DurationVar(&config.dataMoverPrepareTimeout, "data-mover-prepare-timeout", config.dataMoverPrepareTimeout, "How long to wait for preparing a DataUpload/DataDownload. Default is 30 minutes.")
command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics")
command.Flags().StringVar(&config.nodeAgentConfig, "node-agent-configmap", config.nodeAgentConfig, "The name of ConfigMap containing node-agent configurations.")
command.Flags().StringVar(&config.backupRepoConfig, "backup-repository-configmap", config.backupRepoConfig, "The name of ConfigMap containing backup repository configurations.")
return command
}
@@ -143,9 +140,7 @@ type nodeAgentServer struct {
csiSnapshotClient *snapshotv1client.Clientset
dataPathMgr *datapath.Manager
dataPathConfigs *velerotypes.NodeAgentConfigs
backupRepoConfigs map[string]string
vgdpCounter *exposer.VgdpCounter
repoConfigMgr repository.ConfigManager
}
func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, config nodeAgentServerConfig) (*nodeAgentServer, error) {
@@ -239,7 +234,6 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
namespace: factory.Namespace(),
nodeName: nodeName,
metricsAddress: config.metricsAddress,
repoConfigMgr: repository.NewConfigManager(logger),
}
// the cache isn't initialized yet when "validatePodVolumesHostPath" is called, the client returned by the manager cannot
@@ -260,11 +254,6 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
if err := s.getDataPathConfigs(); err != nil {
return nil, err
}
if err := s.getBackupRepoConfigs(); err != nil {
return nil, err
}
s.dataPathMgr = datapath.NewManager(s.getDataPathConcurrentNum(defaultDataPathConcurrentNum))
return s, nil
@@ -340,30 +329,12 @@ func (s *nodeAgentServer) run() {
}
}
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
if err := s.validateCachePVCConfig(*s.dataPathConfigs.CachePVCConfig); err != nil {
s.logger.WithError(err).Warnf("Ignore cache config %v", s.dataPathConfigs.CachePVCConfig)
} else {
s.logger.Infof("Using cache volume configs %v", s.dataPathConfigs.CachePVCConfig)
}
}
var cachePVCConfig *velerotypes.CachePVC
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
cachePVCConfig = s.dataPathConfigs.CachePVCConfig
s.logger.Infof("Using customized cachePVC config %v", cachePVCConfig)
}
if s.backupRepoConfigs != nil {
s.logger.Infof("Using backup repo config %v", s.backupRepoConfigs)
}
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
}
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, s.backupRepoConfigs, cachePVCConfig, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup, s.repoConfigMgr)
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup)
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}
@@ -407,15 +378,12 @@ func (s *nodeAgentServer) run() {
s.vgdpCounter,
loadAffinity,
restorePVCConfig,
s.backupRepoConfigs,
cachePVCConfig,
podResources,
s.nodeName,
s.config.dataMoverPrepareTimeout,
s.logger,
s.metrics,
dataMovePriorityClass,
s.repoConfigMgr,
)
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
@@ -589,32 +557,14 @@ func (s *nodeAgentServer) getDataPathConfigs() error {
configs, err := getConfigsFunc(s.ctx, s.namespace, s.kubeClient, s.config.nodeAgentConfig)
if err != nil {
return errors.Wrapf(err, "error getting node agent configs from configMap %s", s.config.nodeAgentConfig)
s.logger.WithError(err).Errorf("Failed to get node agent configs from configMap %s, ignore it", s.config.nodeAgentConfig)
return err
}
s.dataPathConfigs = configs
return nil
}
func (s *nodeAgentServer) getBackupRepoConfigs() error {
if s.config.backupRepoConfig == "" {
s.logger.Info("No backup repo configMap is specified")
return nil
}
cm, err := s.kubeClient.CoreV1().ConfigMaps(s.namespace).Get(s.ctx, s.config.backupRepoConfig, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error getting backup repo configs from configMap %s", s.config.backupRepoConfig)
}
if cm.Data == nil {
return errors.Errorf("no data is in the backup repo configMap %s", s.config.backupRepoConfig)
}
s.backupRepoConfigs = cm.Data
return nil
}
func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
configs := s.dataPathConfigs
@@ -670,20 +620,3 @@ func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
return concurrentNum
}
func (s *nodeAgentServer) validateCachePVCConfig(config velerotypes.CachePVC) error {
if config.StorageClass == "" {
return errors.New("storage class is absent")
}
sc, err := s.kubeClient.StorageV1().StorageClasses().Get(s.ctx, config.StorageClass, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error getting storage class %s", config.StorageClass)
}
if sc.ReclaimPolicy != nil && *sc.ReclaimPolicy != corev1api.PersistentVolumeReclaimDelete {
return errors.Errorf("unexpected storage class reclaim policy %v", *sc.ReclaimPolicy)
}
return nil
}

View File

@@ -24,7 +24,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -35,8 +34,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/nodeagent"
testutil "github.com/vmware-tanzu/velero/pkg/test"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
func Test_validatePodVolumesHostPath(t *testing.T) {
@@ -145,10 +142,11 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error)
configMapName string
expectConfigs *velerotypes.NodeAgentConfigs
expectedErr string
expectLog string
}{
{
name: "no config specified",
name: "no config specified",
expectLog: "No node-agent configMap is specified",
},
{
name: "failed to get configs",
@@ -156,7 +154,7 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
return nil, errors.New("fake-get-error")
},
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-get-error",
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
},
{
name: "configs cm not found",
@@ -164,7 +162,7 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
return nil, errors.New("fake-not-found-error")
},
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-not-found-error",
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
},
{
@@ -179,21 +177,23 @@ func Test_getDataPathConfigs(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logBuffer := ""
s := &nodeAgentServer{
config: nodeAgentServerConfig{
nodeAgentConfig: test.configMapName,
},
logger: testutil.NewLogger(),
logger: testutil.NewSingleLogger(&logBuffer),
}
getConfigsFunc = test.getFunc
err := s.getDataPathConfigs()
if test.expectedErr == "" {
require.NoError(t, err)
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
s.getDataPathConfigs()
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
if test.expectLog == "" {
assert.Empty(t, logBuffer)
} else {
require.EqualError(t, err, test.expectedErr)
assert.Contains(t, logBuffer, test.expectLog)
}
})
}
@@ -416,117 +416,3 @@ func Test_getDataPathConcurrentNum(t *testing.T) {
})
}
}
func TestGetBackupRepoConfigs(t *testing.T) {
cmNoData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Result()
cmWithData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Data("cacheLimit", "100").Result()
tests := []struct {
name string
configMapName string
kubeClientObj []runtime.Object
expectConfigs map[string]string
expectedErr string
}{
{
name: "no config specified",
},
{
name: "failed to get configs",
configMapName: "backup-repo-config",
expectedErr: "error getting backup repo configs from configMap backup-repo-config: configmaps \"backup-repo-config\" not found",
},
{
name: "configs data not found",
kubeClientObj: []runtime.Object{cmNoData},
configMapName: "backup-repo-config",
expectedErr: "no data is in the backup repo configMap backup-repo-config",
},
{
name: "succeed",
configMapName: "backup-repo-config",
kubeClientObj: []runtime.Object{cmWithData},
expectConfigs: map[string]string{"cacheLimit": "100"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
s := &nodeAgentServer{
namespace: velerov1api.DefaultNamespace,
kubeClient: fakeKubeClient,
config: nodeAgentServerConfig{
backupRepoConfig: test.configMapName,
},
logger: testutil.NewLogger(),
}
err := s.getBackupRepoConfigs()
if test.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, test.expectConfigs, s.backupRepoConfigs)
} else {
require.EqualError(t, err, test.expectedErr)
}
})
}
}
func TestValidateCachePVCConfig(t *testing.T) {
scWithRetainPolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()
scWithDeletePolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result()
scWithNoPolicy := builder.ForStorageClass("fake-storage-class").Result()
tests := []struct {
name string
config velerotypes.CachePVC
kubeClientObj []runtime.Object
expectedErr string
}{
{
name: "no storage class",
expectedErr: "storage class is absent",
},
{
name: "failed to get storage class",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
expectedErr: "error getting storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found",
},
{
name: "storage class reclaim policy is not expected",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithRetainPolicy},
expectedErr: "unexpected storage class reclaim policy Retain",
},
{
name: "storage class reclaim policy is delete",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithDeletePolicy},
},
{
name: "storage class with no reclaim policy",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithNoPolicy},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
s := &nodeAgentServer{
kubeClient: fakeKubeClient,
}
err := s.validateCachePVCConfig(test.config)
if test.expectedErr == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, test.expectedErr)
}
})
}
}

View File

@@ -51,7 +51,6 @@ import (
type podVolumeRestoreConfig struct {
volumePath string
pvrName string
cacheDir string
resourceTimeout time.Duration
}
@@ -87,7 +86,6 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.pvrName, "pod-volume-restore", config.pvrName, "The PVR name")
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
@@ -296,5 +294,5 @@ func (s *podVolumeRestore) createDataPathService() (dataPathService, error) {
return podvolume.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.pvrName, s.namespace, s.nodeName, datapath.AccessPoint{
ByPath: s.config.volumePath,
VolMode: uploader.PersistentVolumeFilesystem,
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.config.cacheDir, s.logger), nil
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.logger), nil
}

View File

@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
if err != nil {

View File

@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
if os.Getenv(cmdtest.CaptureFlag) == "1" {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)

View File

@@ -62,7 +62,7 @@ func TestNewGetCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
require.NoError(t, err)

View File

@@ -47,13 +47,11 @@ const (
defaultDisableInformerCache = false
DefaultItemBlockWorkerCount = 1
DefaultConcurrentBackups = 1
)
var (
// DisableableControllers is a list of controllers that can be disabled
DisableableControllers = []string{
constant.ControllerBackupQueue,
constant.ControllerBackup,
constant.ControllerBackupOperations,
constant.ControllerBackupDeletion,
@@ -176,7 +174,6 @@ type Config struct {
BackupRepoConfig string
RepoMaintenanceJobConfig string
ItemBlockWorkerCount int
ConcurrentBackups int
}
func GetDefaultConfig() *Config {
@@ -209,7 +206,6 @@ func GetDefaultConfig() *Config {
ScheduleSkipImmediately: false,
CredentialsDirectory: credentials.DefaultStoreDirectory(),
ItemBlockWorkerCount: DefaultItemBlockWorkerCount,
ConcurrentBackups: DefaultConcurrentBackups,
}
return config
@@ -265,10 +261,4 @@ func (c *Config) BindFlags(flags *pflag.FlagSet) {
c.ItemBlockWorkerCount,
"Number of worker threads to process ItemBlocks. Default is one. Optional.",
)
flags.IntVar(
&c.ConcurrentBackups,
"concurrent-backups",
c.ConcurrentBackups,
"Number of backups to process concurrently. Default is one. Optional.",
)
}

View File

@@ -581,7 +581,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
constant.ControllerSchedule: {},
constant.ControllerServerStatusRequest: {},
constant.ControllerRestoreFinalizer: {},
constant.ControllerBackupQueue: {},
}
if s.config.RestoreOnly {
@@ -669,7 +668,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.config.MaxConcurrentK8SConnections,
s.config.DefaultSnapshotMoveData,
s.config.ItemBlockWorkerCount,
s.config.ConcurrentBackups,
s.crClient,
).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackup)
@@ -758,7 +756,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.config.RepoMaintenanceJobConfig,
s.logLevel,
s.config.LogFormat,
s.metrics,
).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackupRepo)
}
@@ -912,18 +909,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
if _, ok := enabledRuntimeControllers[constant.ControllerBackupQueue]; ok {
if err := controller.NewBackupQueueReconciler(
s.mgr.GetClient(),
s.mgr.GetScheme(),
s.logger,
s.config.ConcurrentBackups,
backupTracker,
).SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackupQueue)
}
}
s.logger.Info("Server starting...")
if err := s.mgr.Start(s.ctx); err != nil {

View File

@@ -75,7 +75,6 @@ func DescribeBackup(
case velerov1api.BackupPhaseFinalizing, velerov1api.BackupPhaseFinalizingPartiallyFailed:
case velerov1api.BackupPhaseInProgress:
case velerov1api.BackupPhaseNew:
case velerov1api.BackupPhaseQueued, velerov1api.BackupPhaseReadyToStart:
}
logsNote := ""
@@ -84,9 +83,6 @@ func DescribeBackup(
}
d.Printf("Phase:\t%s%s\n", phaseString, logsNote)
if phase == velerov1api.BackupPhaseQueued {
d.Printf("Queue position:\t%v\n", backup.Status.QueuePosition)
}
if backup.Spec.ResourcePolicy != nil {
d.Println()
@@ -319,14 +315,8 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
}
// DescribeBackupStatus describes a backup status in human-readable format.
func DescribeBackupStatus(ctx context.Context,
kbClient kbclient.Client,
d *Describer,
backup *velerov1api.Backup,
details bool,
insecureSkipTLSVerify bool,
caCertPath string,
podVolumeBackups []velerov1api.PodVolumeBackup) {
func DescribeBackupStatus(ctx context.Context, kbClient kbclient.Client, d *Describer, backup *velerov1api.Backup, details bool,
insecureSkipTLSVerify bool, caCertPath string, podVolumeBackups []velerov1api.PodVolumeBackup) {
status := backup.Status
// Status.Version has been deprecated, use Status.FormatVersion
@@ -723,9 +713,6 @@ func describeDataMovement(d *Describer, details bool, info *volume.BackupVolumeI
d.Printf("\t\t\t\tData Mover: %s\n", dataMover)
d.Printf("\t\t\t\tUploader Type: %s\n", info.SnapshotDataMovementInfo.UploaderType)
d.Printf("\t\t\t\tMoved data Size (bytes): %d\n", info.SnapshotDataMovementInfo.Size)
if info.SnapshotDataMovementInfo.IncrementalSize > 0 {
d.Printf("\t\t\t\tIncremental data Size (bytes): %d\n", info.SnapshotDataMovementInfo.IncrementalSize)
}
d.Printf("\t\t\t\tResult: %s\n", info.Result)
} else {
d.Printf("\t\t\tData Movement: %s\n", "included, specify --details for more information")
@@ -848,7 +835,7 @@ func describePodVolumeBackups(d *Describer, details bool, podVolumeBackups []vel
backupsByPod := new(volumesByPod)
for _, backup := range backupsByPhase[phase] {
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress, backup.Status.IncrementalBytes)
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress)
}
d.Printf("\t\t%s:\n", phase)
@@ -898,8 +885,7 @@ type volumesByPod struct {
// Add adds a pod volume with the specified pod namespace, name
// and volume to the appropriate group.
// Used for both backup and restore
func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veleroapishared.DataMoveOperationProgress, incrementalBytes int64) {
func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veleroapishared.DataMoveOperationProgress) {
if v.volumesByPodMap == nil {
v.volumesByPodMap = make(map[string]*podVolumeGroup)
}
@@ -909,12 +895,6 @@ func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veler
// append backup progress percentage if backup is in progress
if phase == "In Progress" && progress.TotalBytes != 0 {
volume = fmt.Sprintf("%s (%.2f%%)", volume, float64(progress.BytesDone)/float64(progress.TotalBytes)*100)
} else if phase == string(velerov1api.PodVolumeBackupPhaseCompleted) && incrementalBytes > 0 {
volume = fmt.Sprintf("%s (size: %v, incremental size: %v)", volume, progress.TotalBytes, incrementalBytes)
} else if (phase == string(velerov1api.PodVolumeBackupPhaseCompleted) ||
phase == string(velerov1api.PodVolumeRestorePhaseCompleted)) &&
progress.TotalBytes > 0 {
volume = fmt.Sprintf("%s (size: %v)", volume, progress.TotalBytes)
}
if group, ok := v.volumesByPodMap[key]; !ok {

Some files were not shown because too many files have changed in this diff Show More