Compare commits

..

1 Commits

Author SHA1 Message Date
Xun Jiang
4121808f38 Fix the Job build error when BackupReposiotry name longer than 63.
Fix the Job build error.
Consider the name length limitation change in job list code.

Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
2025-10-20 22:35:20 +08:00
205 changed files with 833 additions and 4787 deletions

View File

@@ -21,7 +21,7 @@ jobs:
minio-dockerfile-sha: ${{ steps.minio-version.outputs.dockerfile_sha }}
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6
@@ -112,7 +112,7 @@ jobs:
fail-fast: false
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6
@@ -185,7 +185,7 @@ jobs:
timeout-minutes: 30
- name: Upload debug bundle
if: ${{ failure() }}
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: DebugBundle-k8s-${{ matrix.k8s }}-job-${{ strategy.job-index }}
name: DebugBundle
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*

View File

@@ -17,7 +17,7 @@ jobs:
version: ${{ steps.pick-version.outputs.version }}
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- id: pick-version
run: |

View File

@@ -19,7 +19,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master

View File

@@ -12,7 +12,7 @@ jobs:
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Changelog check
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'kind/changelog-not-required') || contains(github.event.pull_request.labels.*.name, 'Design') || contains(github.event.pull_request.labels.*.name, 'Website') || contains(github.event.pull_request.labels.*.name, 'Documentation'))}}

View File

@@ -14,7 +14,7 @@ jobs:
fail-fast: false
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6

View File

@@ -8,7 +8,7 @@ jobs:
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Codespell
uses: codespell-project/actions-codespell@master

View File

@@ -13,7 +13,7 @@ jobs:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
name: Checkout
- name: Set up QEMU

View File

@@ -14,7 +14,7 @@ jobs:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
name: Checkout
- name: Verify .goreleaser.yml and try a dryrun release.

View File

@@ -18,7 +18,7 @@ jobs:
needs: get-go-version
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6
@@ -26,7 +26,7 @@ jobs:
go-version: ${{ needs.get-go-version.outputs.version }}
- name: Linter check
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v8
with:
version: v2.5.0
version: v2.1.1
args: --verbose

View File

@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
# The default value is "1" which fetches only a single commit. If we merge PR without squash or rebase,
# there are at least two commits: the first one is the merge commit and the second one is the real commit

View File

@@ -12,7 +12,7 @@ jobs:
get-go-version:
uses: ./.github/workflows/get-go-version.yaml
with:
ref: ${{ github.ref_name }}
ref: ${{ github.ref }}
build:
name: Build
@@ -20,7 +20,7 @@ jobs:
needs: get-go-version
steps:
- name: Check out the code
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go version
uses: actions/setup-go@v6

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the latest code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Automatic Rebase

View File

@@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
ARG GOPROXY
ARG BIN
@@ -49,7 +49,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS restic-builder
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS restic-builder
ARG GOPROXY
ARG BIN

View File

@@ -15,7 +15,7 @@
ARG OS_VERSION=1809
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS velero-builder
FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS velero-builder
ARG GOPROXY
ARG BIN

View File

@@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.25 as tilt-helper
FROM golang:1.24 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@@ -1 +0,0 @@
Fix issue #7725, add design for backup repo cache configuration

View File

@@ -1 +0,0 @@
Fix issue #9193, don't connect repo in repo controller

View File

@@ -1 +0,0 @@
Add cache configuration to VGDP

View File

@@ -1 +0,0 @@
Fix the Job build error when BackupReposiotry name longer than 63.

View File

@@ -1 +0,0 @@
Add cache dir configuration for udmrepo

View File

@@ -1 +0,0 @@
Add snapshotSize for DataDownload, PodVolumeRestore

View File

@@ -1 +0,0 @@
Add incrementalSize to DU/PVB for reporting new/changed size

View File

@@ -1 +0,0 @@
Support cache volume for generic restore exposer and pod volume exposer

View File

@@ -1 +0,0 @@
Fix managed fields patch for resources using GenerateName

View File

@@ -1 +0,0 @@
Track actual resource names for GenerateName in restore status

View File

@@ -1 +0,0 @@
Add cache volume configuration

View File

@@ -1 +0,0 @@
Fix issue #9365, prevent fake completion notification due to multiple update of single PVR

View File

@@ -1 +0,0 @@
Refactor repo provider interface for static configuration

View File

@@ -1 +0,0 @@
don't copy securitycontext from first container if configmap found

View File

@@ -1 +0,0 @@
Cache volume support for DataDownload

View File

@@ -1 +0,0 @@
Cache volume for PVR

View File

@@ -1 +0,0 @@
Fix issue #9400, connect repo first time after creation so that init params could be written

View File

@@ -1 +0,0 @@
Fix issue #9276, add doc for cache volume support

View File

@@ -1 +0,0 @@
Apply volume policies to VolumeGroupSnapshot PVC filtering

View File

@@ -1 +0,0 @@
Fix issue #9194, add doc for GOMAXPROCS behavior change

View File

@@ -33,12 +33,6 @@ spec:
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Incremental bytes
format: int64
jsonPath: .status.incrementalBytes
name: Incremental Bytes
priority: 10
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
@@ -195,11 +189,6 @@ spec:
format: date-time
nullable: true
type: string
incrementalBytes:
description: IncrementalBytes holds the number of bytes new or changed
since the last backup
format: int64
type: integer
message:
description: Message is a message about the pod volume backup's status.
type: string

View File

@@ -133,10 +133,6 @@ spec:
snapshotID:
description: SnapshotID is the ID of the volume snapshot to be restored.
type: string
snapshotSize:
description: SnapshotSize is the logical size in Bytes of the snapshot.
format: int64
type: integer
sourceNamespace:
description: SourceNamespace is the original namespace for namaspace
mapping.

File diff suppressed because one or more lines are too long

View File

@@ -108,10 +108,6 @@ spec:
description: SnapshotID is the ID of the Velero backup snapshot to
be restored from.
type: string
snapshotSize:
description: SnapshotSize is the logical size in Bytes of the snapshot.
format: int64
type: integer
sourceNamespace:
description: |-
SourceNamespace is the original namespace where the volume is backed up from.

View File

@@ -33,12 +33,6 @@ spec:
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Incremental bytes
format: int64
jsonPath: .status.incrementalBytes
name: Incremental Bytes
priority: 10
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
@@ -179,11 +173,6 @@ spec:
as a result of the DataUpload.
nullable: true
type: object
incrementalBytes:
description: IncrementalBytes holds the number of bytes new or changed
since the last backup
format: int64
type: integer
message:
description: Message is a message about the DataUpload's status.
type: string

File diff suppressed because one or more lines are too long

View File

@@ -1,231 +0,0 @@
# Backup Repository Cache Volume Design
## Glossary & Abbreviation
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
**Data Mover Pods**: Intermediate pods which hold VGDP and complete the data transfer. See [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3] for details.
**Repository Maintenance Pods**: Pods for [Repository Maintenance Jobs][4], which holds VGDP to run repository maintenance.
## Background
According to the [Unified Repository design][1] Velero uses selectable backup repositories for various backup/restore methods, i.e., fs-backup, volume snapshot data movement, etc. Some backup repositories may need to cache data on the client side for various repository operation, so as to accelerate the execution.
In the existing [Backup Repository Configuration][5], we allow users to configure the cache data size (`cacheLimitMB`). However, the cache data is still stored in the root file system of data mover pods/repository maintenance pods, so stored in the root file system of the node. This is not good enough, reasons:
- In many distributions, the node's system disk size is predefined, non configurable and limit, e.g., the system disk size may be 20G or less
- Velero supports concurrent data movements in each node. The cache in each of the concurrent data mover pods could quickly run out of the system disk and cause problems like pod eviction, failure of pod creation, degradation of Kubernetes QoS, etc.
We need to allow users to prepare a dedicated location, e.g., a dedictated volume, for the cache.
Not all backup repositories or not all backup repository operations require cache, we need to define the details when and how the cache is used.
## Goals
- Create a mechanism for users to configure cache volumes for various pods running VGDP
- Design the workflow to assign the cache volume pod path to backup repositories
- Describe when and how the cache volume is used
## Non-Goals
- The solution is based on [Unified Repository design][1], [VGDP Micro Service for Volume Snapshot Data Movement][2] and [VGDP Micro Service For fs-backup][3], legacy data paths are not supported. E.g., when a pod volume restore (PVR) runs with legacy Restic path, if any data is cached, the cache still resides in the root file system.
## Solution
### Cache Data
Varying on backup repositoires, cache data may include payload data or repository metadata, e.g., indexes to the payload data chunks.
Payload data is highly related to the backup data, and normally take the majority of the repository data as well as the cache data.
Repository metadata is related to the backup repository's chunking algorithm, data chunk mapping method, etc, and so the size is not proportional to the backup data size.
On the other hand for some backup repository, in extreme cases, the repository metadata may be significantly large. E.g., Kopia's indexes are per chunks, if there are huge number of small files in the repository, Kopia's index data may be in the same level of or even larger than the payload data.
However, in the cases that repository metadata data become the majority, other bottlenecks may emerge and concurrency of data movers may be significantly constrained, so the requirement to cache volumes may go away.
Therefore, for now we only consider the cache volume requirement for payload data, and leave the consideration for metadata as a future enhancement.
### Scenarios
Backup repository cache varies on backup repositories and backup repository operation during VGDP runs. Below are the scenarios when VGDP runs:
- Data Upload for Backup: this is the process to upload/write the backup data into the backup repository, e.g., DataUpload or PodVolumeBackup. The pieces of data is almost directly written to the repository, sometimes with a small group staying shortly in the local place. That is to say, there should not be large scale data cached for this scenario, so we don't prepare dedicated cache for this scenario.
- Repository Maintenance: Repository maintenance most often visits the backup repository's metadata and sometimes it needs to visit the file system directories from the backed up data. On the other hand, it is not practical to run concurrent maintenance jobs in one node. So the cache data is neither large nor affect the root file system too much. Therefore, we don't need to prepare dedicated cache for this scenario.
- Data Download for Restore: this is the process to download/read the backup data from the backup repository during restore, e.g., DataDownload or PodVolumeRestore. For backup repositories for which data are stored in remote backup storages (e.g., Kopia repository stores data in remote object stores), large scale of data are cached locally to accerlerate the restore. Therefore, we need dedicate cache volumes for this scenario.
- Backup Deletion: During this scenario, backup repository is connected, metadata is enumerated to find the repository snapshot representing the backup data. That is to say, only metadata is cached if any. Therefore, dedicated cache volumes are not required in this scenario.
The above analyses are based on the common behavior of backup repositories and they are not considering the case that backup repository metadata takes majority or siginficant proportion of the cache data.
As a conclusion of the analyses, we will create dedicated cache volumes for restore scenarios.
For other scenarios, we can add them regarded to the future changes/requirements. The mechanism to expose and connect the cache volumes should work for all scenarios. E.g., if we need to consider the backup repository metadata case, we may need cache volumes for backup and repository maintenance as well, then we can just reuse the same cache volume provision and connection mechanism to backup and repository maintenance scenarios.
### Cache Data and Lifecycle
If available, one cache volume is dedicately assigned to one data mover pod. That is, the cached data is destroyed when the data mover pod completes. Then the backup repository instance also closes.
Cache data are fully managed by the specific backup repository. So the backup repository may also have its own way to GC the cache data.
That is to say, cache data GC may be launched by the backup repository instance during the running of the data mover pod; then the left data are automatically destroyed when the data mover pod and the cache PVC are destroyed (cache PVC's `reclaimPolicy` is always `Deleted`, so once the cache PVC is destroyed, the volume will also be destroyed). So no specially logics are needed for cache data GC.
### Data Size
Cache volumes take storage space and cluster resources (PVC, PV), therefore, cache volumes should be created only when necessary and the volumes should be with reasonable size based on the cache data size:
- It is not a good bargain to have cache volumes for small backups, small backups will use resident cache location (the cache location in the root file system)
- The cache data size has a limit, the existing `cacheLimitMB` is used for this purpose. E.g., it could be set as 1024 for a 1TB backup, which means 1GB of data is cached and the old cache data exceeding this size will be cleared. Therefore, it is meaningless to set the cache volume size much larger than `cacheLimitMB`
### Cache Volume Size
The cache volume size is calculated from below factors (for Restore scenarios):
- **Limit**: The limit of the cache data, that is represented by `cacheLimitMB`, the default value is 5GB
- **backupSize**: The size of the backup as a reference to evaluate whether to create a cache volume. It doesn't mean the backup data really decides the cache data all the time, it is just a reference to evaluate the scale of the backup, small scale backups may need small cache data. Sometimes, backupSize is not irrelevant to the size of cache data, in this case, ResidentThreshold should not be set, Limit will be used directly. It is unlikely that backupSize is unavailable, but once that happens, ResidentThreshold is ignored, Limit will be used directly.
- **ResidentThreshold**: The minimum backup size that a cache volume is created
- **InflationPercentage**: Considering the overhead of the file system and the possible delay of the cache cleanup, there should be an inflation for the final volume size vs. the logical size, otherwise, the cache volume may be overrun. This inflation percentage is hardcoded, e.g., 20%.
A formula is as below:
```
cacheVolumeSize = ((backupSize != 0 ? (backupSize > residentThreshold ? limit : 0) : limit) * (100 + inflationPercentage)) / 100
```
Finally, the `cacheVolumeSize` will be rounded up to GiB considering the UX friendliness, storage friendliness and management friendliness.
### PVC/PV
The PVC for a cache volume is created in Velero namespace and a storage class is required for the cache PVC. The PVC's accessMode is `ReadWriteOnce` and volumeMode is `FileSystem`, so the storage class provided should support this specification. Otherwise, if the storageclass doesn't support either of the specifications, the data mover pod may be hang in `Pending` state until a timeout setting with the data movement (e.g. `prepareTimeout`) and the data movement will finally fail.
It is not expected that the cache volume is retained after data mover pod is deleted, so the `reclaimPolicy` for the storageclass must be `Delete`.
To detect the problems in the storageclass and fail earlier, a validation is applied to the storageclass and once the validation fails, the cache configuration will be ignored, so the data mover pod will be created without a cache volume.
### Cache Volume Configurations
Below configurations are introduced:
- **residentThresholdMB**: the minimum data size(in MB) to be processed (if available) that a cache volume is created
- **cacheStorageClass**: the name of the storage class to provision the cache PVC
Not like `cacheLimitMB` which is set to and affect the backup repository, the above two configurations are actually data mover configurations of how to create cache volumes to data mover pods; and the two configurations don't need to be per backup repository. So we add them to the node-agent Configuration.
### Sample
Below are some examples of the node-agent configMap with the configurations:
Sample-1:
```json
{
"cacheVolume": {
"storageClass": "sc-1",
"residentThresholdMB": 1024
}
}
```
Sample-2:
```json
{
"cacheVolume": {
"storageClass": "sc-1",
}
}
```
Sample-3:
```json
{
"cacheVolume": {
"residentThresholdMB": 1024
}
}
```
**sample-1**: This is a valid configuration. Restores with backup data size larger than 1G will be assigned a cache volume using storage class `sc-1`.
**sample-2**: This is a valid configuration. Data mover pods are always assigned a cache volume using storage class `sc-1`.
**sample-3**: This is not a valid configuration because the storage class is absent. Velero gives up creating a cache volume.
To create the configMap, users need to save something like the above sample to a json file and then run below command:
```
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
```
The cache volume configurations will be visited by node-agent server, so they also need to specify the `--node-agent-configmap` to the `velero node-agent` parameters.
## Detailed Design
### Backup and Restore
The restore needs to know the backup size so as to calculate the cache volume size, some new fields are added to the DataDownload and PodVolumeRestore CRDs.
`snapshotSize` field is also added to DataDownload and PodVolumeRestore's `spec`:
```yaml
spec:
snapshotID:
description: SnapshotID is the ID of the Velero backup snapshot to
be restored from.
type: string
snapshotSize:
description: SnapshotSize is the logical size of the snapshot.
format: int64
type: integer
```
`snapshotSize` represents the total size of the backup; during restore, the value is transferred from DataUpload/PodVolumeBackup's `Status.Progress.TotalBytes` to DataDownload/PodVolumeRestore.
It is unlikely that `Status.Progress.TotalBytes` from DataUpload/PodVolumeBackup is unavailable, but once it happens, according to the above formula, `residentThresholdMB` is ignored, cache volume size is calculated directly from cache limit for the corresponding backup repository.
### Exposer
Cache volume configurations are retrieved by node-agent and passed through DataDownload/PodVolumeRestore to GenericRestore exposer/PodVolume exposer.
The exposers are responsible to calculate cache volume size, create cache PVCs and mount them to the restorePods.
If the calculated cache volume size is 0, or any of the critical parameters is missing (e.g., cache volume storage class), the exposers ignore the cache volume configuration and continue with creating restorePods without cache volumes, so no impact to the result of the restore.
Exposers mount the cache volume to a predefined directory and pass the directory to the data mover pods through the `cache-volume-path` parameter.
Below data structure is added to the exposers' expose parameters:
```go
type GenericRestoreExposeParam struct {
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *CacheVolumeInfo
}
type PodVolumeExposeParam struct {
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *repocache.CacheConfigs
}
type CacheConfigs struct {
// StorageClass specifies the storage class for cache volumes
StorageClass string
// Limit specifies the maximum size of the cache data
Limit int64
// ResidentThreshold specifies the minimum size of the cache data to create a cache volume
ResidentThreshold int64
}
```
### Data Mover Pods
Data mover pods retrieve the cache volume directory from `cache-volume-path` parameter and pass it to Unified Repository.
If the directory is empty, Unified Repository uses the resident location for data cache, that is, the root file system.
### Kopia Repository
Kopia repository supports cache directory configuration for both metadata and data. The existing `SetupConnectOptions` is modified to customize the `CacheDirectory`:
```go
func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions {
...
return repo.ConnectOptions{
CachingOptions: content.CachingOptions{
CacheDirectory: cacheDir,
...
},
...
}
}
```
[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
[2]: Implemented/vgdp-micro-service/vgdp-micro-service.md
[3]: Implemented/vgdp-micro-service-for-fs-backup/vgdp-micro-service-for-fs-backup.md
[4]: Implemented/repo_maintenance_job_config.md
[5]: Implemented/backup-repo-config.md

View File

@@ -1,115 +0,0 @@
# Wildcard Namespace Support
## Abstract
Velero currently treats namespace patterns with glob characters as literal strings. This design adds wildcard expansion to support flexible namespace selection using patterns like `app-*` or `test-{dev,staging}`.
## Background
Requested in [#1874](https://github.com/vmware-tanzu/velero/issues/1874) for more flexible namespace selection.
## Goals
- Support glob pattern expansion in namespace includes/excludes
- Maintain backward compatibility with existing `*` behavior
## Non-Goals
- Complex regex patterns beyond basic globs
## High-Level Design
Wildcard expansion occurs early in both backup and restore flows, converting patterns to literal namespace lists before normal processing.
### Backup Flow
Expansion happens in `getResourceItems()` before namespace collection:
1. Check if wildcards exist using `ShouldExpandWildcards()`
2. Expand patterns against active cluster namespaces
3. Replace includes/excludes with expanded literal namespaces
4. Continue with normal backup processing
### Restore Flow
Expansion occurs in `execute()` after parsing backup contents:
1. Extract available namespaces from backup tar
2. Expand patterns against backup namespaces (not cluster namespaces)
3. Update restore context with expanded namespaces
4. Continue with normal restore processing
This ensures restore wildcards match actual backup contents, not current cluster state.
## Detailed Design
### Status Fields
Add wildcard expansion tracking to backup and restore CRDs:
```go
type WildcardNamespaceStatus struct {
// IncludeWildcardMatches records namespaces that matched include patterns
// +optional
IncludeWildcardMatches []string `json:"includeWildcardMatches,omitempty"`
// ExcludeWildcardMatches records namespaces that matched exclude patterns
// +optional
ExcludeWildcardMatches []string `json:"excludeWildcardMatches,omitempty"`
// WildcardResult records final namespaces after wildcard processing
// +optional
WildcardResult []string `json:"wildcardResult,omitempty"`
}
// Added to both BackupStatus and RestoreStatus
type BackupStatus struct {
// WildcardNamespaces contains wildcard expansion results
// +optional
WildcardNamespaces *WildcardNamespaceStatus `json:"wildcardNamespaces,omitempty"`
}
```
### Wildcard Expansion Package
New `pkg/util/wildcard/expand.go` package provides:
- `ShouldExpandWildcards()` - Skip expansion for simple "*" case
- `ExpandWildcards()` - Main expansion function using `github.com/gobwas/glob`
- Pattern validation rejecting unsupported regex symbols
**Supported patterns**: `*`, `?`, `[abc]`, `{a,b,c}`
**Unsupported**: `|()`, `**`
### Implementation Details
#### Backup Integration (`pkg/backup/item_collector.go`)
Expansion in `getResourceItems()`:
- Call `wildcard.ExpandWildcards()` with cluster namespaces
- Update `NamespaceIncludesExcludes` with expanded results
- Populate status fields with expansion results
#### Restore Integration (`pkg/restore/restore.go`)
Expansion in `execute()`:
```go
if wildcard.ShouldExpandWildcards(includes, excludes) {
availableNamespaces := extractNamespacesFromBackup(backupResources)
expandedIncludes, expandedExcludes, err := wildcard.ExpandWildcards(
availableNamespaces, includes, excludes)
// Update context and status
}
```
## Alternatives Considered
1. **Client-side expansion**: Rejected because it wouldn't work for scheduled backups
2. **Expansion in `collectNamespaces`**: Rejected because these functions expect literal namespaces
## Compatibility
Maintains full backward compatibility - existing "*" behavior unchanged.
## Implementation
Target: Velero 1.18

18
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.25.0
go 1.24.0
require (
cloud.google.com/go/storage v1.55.0
@@ -41,9 +41,10 @@ require (
github.com/stretchr/testify v1.10.0
github.com/vmware-tanzu/crash-diagnostics v0.3.7
go.uber.org/zap v1.27.0
golang.org/x/mod v0.29.0
golang.org/x/mod v0.26.0
golang.org/x/net v0.42.0
golang.org/x/oauth2 v0.30.0
golang.org/x/text v0.31.0
golang.org/x/text v0.27.0
google.golang.org/api v0.241.0
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
@@ -179,14 +180,13 @@ require (
go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.34.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect

32
go.sum
View File

@@ -794,8 +794,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -833,8 +833,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -880,8 +880,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -908,8 +908,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -973,14 +973,14 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -990,8 +990,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1051,8 +1051,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$TARGETPLATFORM golang:1.25-bookworm
FROM --platform=$TARGETPLATFORM golang:1.24-bookworm
ARG GOPROXY
@@ -94,7 +94,7 @@ RUN ARCH=$(go env GOARCH) && \
chmod +x /usr/bin/goreleaser
# get golangci-lint
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.1
# install kubectl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl

View File

@@ -170,9 +170,6 @@ type SnapshotDataMovementInfo struct {
// Moved snapshot data size.
Size int64 `json:"size"`
// Moved snapshot incremental size.
IncrementalSize int64 `json:"incrementalSize,omitempty"`
// The DataUpload's Status.Phase value
Phase velerov2alpha1.DataUploadPhase
}
@@ -220,9 +217,6 @@ type PodVolumeInfo struct {
// The snapshot corresponding volume size.
Size int64 `json:"size,omitempty"`
// The incremental snapshot size.
IncrementalSize int64 `json:"incrementalSize,omitempty"`
// The type of the uploader that uploads the data. The valid values are `kopia` and `restic`.
UploaderType string `json:"uploaderType"`
@@ -246,15 +240,14 @@ type PodVolumeInfo struct {
func newPodVolumeInfoFromPVB(pvb *velerov1api.PodVolumeBackup) *PodVolumeInfo {
return &PodVolumeInfo{
SnapshotHandle: pvb.Status.SnapshotID,
Size: pvb.Status.Progress.TotalBytes,
IncrementalSize: pvb.Status.IncrementalBytes,
UploaderType: pvb.Spec.UploaderType,
VolumeName: pvb.Spec.Volume,
PodName: pvb.Spec.Pod.Name,
PodNamespace: pvb.Spec.Pod.Namespace,
NodeName: pvb.Spec.Node,
Phase: pvb.Status.Phase,
SnapshotHandle: pvb.Status.SnapshotID,
Size: pvb.Status.Progress.TotalBytes,
UploaderType: pvb.Spec.UploaderType,
VolumeName: pvb.Spec.Volume,
PodName: pvb.Spec.Pod.Name,
PodNamespace: pvb.Spec.Pod.Namespace,
NodeName: pvb.Spec.Node,
Phase: pvb.Status.Phase,
}
}

View File

@@ -118,10 +118,6 @@ type PodVolumeBackupStatus struct {
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// IncrementalBytes holds the number of bytes new or changed since the last backup
// +optional
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
// The server's time is used for AcceptedTimestamp
// +optional
@@ -138,7 +134,6 @@ type PodVolumeBackupStatus struct {
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"

View File

@@ -58,10 +58,6 @@ type PodVolumeRestoreSpec struct {
// Cancel indicates request to cancel the ongoing PodVolumeRestore. It can be set
// when the PodVolumeRestore is in InProgress phase
Cancel bool `json:"cancel,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.

View File

@@ -58,10 +58,6 @@ type DataDownloadSpec struct {
// NodeOS is OS of the node where the DataDownload is processed.
// +optional
NodeOS NodeOS `json:"nodeOS,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}
// TargetVolumeSpec is the specification for a target PVC.

View File

@@ -155,10 +155,6 @@ type DataUploadStatus struct {
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// IncrementalBytes holds the number of bytes new or changed since the last backup
// +optional
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
// Node is name of the node where the DataUpload is processed.
// +optional
Node string `json:"node,omitempty"`
@@ -189,7 +185,6 @@ type DataUploadStatus struct {
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed"
@@ -249,8 +244,4 @@ type DataUploadResult struct {
// NodeOS is OS of the node where the DataUpload is processed.
// +optional
NodeOS NodeOS `json:"nodeOS,omitempty"`
// SnapshotSize is the logical size in Bytes of the snapshot.
// +optional
SnapshotSize int64 `json:"snapshotSize,omitempty"`
}

View File

@@ -621,30 +621,8 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace)
}
// Filter PVCs by volume policy
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup)
if err != nil {
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
}
// Warn if any PVCs were filtered out
if len(filteredPVCs) < len(groupedPVCs) {
for _, originalPVC := range groupedPVCs {
found := false
for _, filteredPVC := range filteredPVCs {
if originalPVC.Name == filteredPVC.Name {
found = true
break
}
}
if !found {
p.log.Warnf("PVC %s/%s has VolumeGroupSnapshot label %s=%s but is excluded by volume policy", originalPVC.Namespace, originalPVC.Name, vgsLabelKey, group)
}
}
}
// Determine the CSI driver for the grouped PVCs
driver, err := p.determineCSIDriver(filteredPVCs)
driver, err := p.determineCSIDriver(groupedPVCs)
if err != nil {
return nil, errors.Wrapf(err, "failed to determine CSI driver for PVCs in VolumeGroupSnapshot group %q", group)
}
@@ -665,7 +643,7 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
}
// Wait for all the VS objects associated with the VGS to have status and VGS Name (VS readiness is checked in legacy flow) and get the PVC-to-VS map
vsMap, err := p.waitForVGSAssociatedVS(ctx, filteredPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
vsMap, err := p.waitForVGSAssociatedVS(ctx, groupedPVCs, newVGS, backup.Spec.CSISnapshotTimeout.Duration)
if err != nil {
return nil, errors.Wrapf(err, "timeout waiting for VolumeSnapshots to have status created via VolumeGroupSnapshot %s", newVGS.Name)
}
@@ -756,40 +734,6 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
return pvcList.Items, nil
}
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
pvcs []corev1api.PersistentVolumeClaim,
backup *velerov1api.Backup,
) ([]corev1api.PersistentVolumeClaim, error) {
var filteredPVCs []corev1api.PersistentVolumeClaim
for _, pvc := range pvcs {
// Convert PVC to unstructured for ShouldPerformSnapshotWithBackup
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name)
}
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
// Check if this PVC should be snapshotted according to volume policies
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
unstructuredPVC,
kuberesource.PersistentVolumeClaims,
*backup,
p.crClient,
p.log,
)
if err != nil {
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
}
if shouldSnapshot {
filteredPVCs = append(filteredPVCs, pvc)
}
}
return filteredPVCs, nil
}
func (p *pvcBackupItemAction) determineCSIDriver(
pvcs []corev1api.PersistentVolumeClaim,
) (string, error) {

View File

@@ -586,280 +586,6 @@ func TestListGroupedPVCs(t *testing.T) {
}
}
func TestFilterPVCsByVolumePolicy(t *testing.T) {
tests := []struct {
name string
pvcs []corev1api.PersistentVolumeClaim
pvs []corev1api.PersistentVolume
volumePolicyStr string
expectCount int
expectError bool
}{
{
name: "All PVCs should be included when no volume policy",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-1", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-1",
StorageClassName: pointer.String("sc-1"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-2",
StorageClassName: pointer.String("sc-1"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-1"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-2"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver-1"},
},
},
},
},
expectCount: 2,
},
{
name: "Filter out NFS PVC by volume policy",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-csi",
StorageClassName: pointer.String("sc-1"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export",
},
},
},
},
},
volumePolicyStr: `
version: v1
volumePolicies:
- conditions:
nfs: {}
action:
type: skip
`,
expectCount: 1,
},
{
name: "All PVCs filtered out by volume policy",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-1", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs-1",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs-2", Namespace: "ns-1"},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs-2",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-1"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export/1",
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs-2"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export/2",
},
},
},
},
},
volumePolicyStr: `
version: v1
volumePolicies:
- conditions:
nfs: {}
action:
type: skip
`,
expectCount: 0,
},
{
name: "Filter out non-CSI PVCs from mixed driver group",
pvcs: []corev1api.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-linstor",
Namespace: "ns-1",
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-linstor",
StorageClassName: pointer.String("sc-linstor"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-nfs",
Namespace: "ns-1",
Labels: map[string]string{"app.kubernetes.io/instance": "myapp"},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "pv-nfs",
StorageClassName: pointer.String("sc-nfs"),
},
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
},
},
pvs: []corev1api.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-linstor"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "linstor.csi.linbit.com"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
Spec: corev1api.PersistentVolumeSpec{
PersistentVolumeSource: corev1api.PersistentVolumeSource{
NFS: &corev1api.NFSVolumeSource{
Server: "nfs-server",
Path: "/export",
},
},
},
},
},
volumePolicyStr: `
version: v1
volumePolicies:
- conditions:
nfs: {}
action:
type: skip
`,
expectCount: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
objs := []runtime.Object{}
for i := range tt.pvs {
objs = append(objs, &tt.pvs[i])
}
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
backup := &velerov1api.Backup{
ObjectMeta: metav1.ObjectMeta{
Name: "test-backup",
Namespace: "velero",
},
Spec: velerov1api.BackupSpec{},
}
// Add volume policy ConfigMap if specified
if tt.volumePolicyStr != "" {
cm := &corev1api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "volume-policy",
Namespace: "velero",
},
Data: map[string]string{
"volume-policy": tt.volumePolicyStr,
},
}
require.NoError(t, client.Create(t.Context(), cm))
backup.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{
Kind: "ConfigMap",
Name: "volume-policy",
}
}
action := &pvcBackupItemAction{
log: velerotest.NewLogger(),
crClient: client,
}
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup)
if tt.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Len(t, result, tt.expectCount)
// For mixed driver scenarios, verify filtered result can determine single CSI driver
if tt.name == "Filter out non-CSI PVCs from mixed driver group" && len(result) > 0 {
driver, err := action.determineCSIDriver(result)
require.NoError(t, err, "After filtering, determineCSIDriver should not fail with multiple drivers error")
require.Equal(t, "linstor.csi.linbit.com", driver, "Should have the Linstor driver after filtering out NFS")
}
}
})
}
}
func TestDetermineCSIDriver(t *testing.T) {
tests := []struct {
name string

View File

@@ -1198,7 +1198,6 @@ func updateVolumeInfos(
volumeInfos[index].SnapshotDataMovementInfo.SnapshotHandle = dataUpload.Status.SnapshotID
volumeInfos[index].SnapshotDataMovementInfo.RetainedSnapshot = dataUpload.Spec.CSISnapshot.VolumeSnapshot
volumeInfos[index].SnapshotDataMovementInfo.Size = dataUpload.Status.Progress.TotalBytes
volumeInfos[index].SnapshotDataMovementInfo.IncrementalSize = dataUpload.Status.IncrementalBytes
volumeInfos[index].SnapshotDataMovementInfo.Phase = dataUpload.Status.Phase
if dataUpload.Status.Phase == velerov2alpha1.DataUploadPhaseCompleted {

View File

@@ -5578,7 +5578,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseFailed).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
@@ -5604,7 +5603,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
RetainedSnapshot: "vs-1",
SnapshotHandle: "snapshot-id",
Size: 1000,
IncrementalSize: 500,
Phase: velerov2alpha1.DataUploadPhaseFailed,
},
},
@@ -5618,7 +5616,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").
@@ -5644,7 +5641,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
RetainedSnapshot: "vs-1",
SnapshotHandle: "snapshot-id",
Size: 1000,
IncrementalSize: 500,
Phase: velerov2alpha1.DataUploadPhaseCompleted,
},
},
@@ -5659,7 +5655,6 @@ func TestUpdateVolumeInfos(t *testing.T) {
CSISnapshot(&velerov2alpha1.CSISnapshotSpec{VolumeSnapshot: "vs-1"}).
SnapshotID("snapshot-id").
Progress(shared.DataMoveOperationProgress{TotalBytes: 1000}).
IncrementalBytes(500).
Phase(velerov2alpha1.DataUploadPhaseCompleted).
SourceNamespace("ns-1").
SourcePVC("pvc-1").

View File

@@ -145,12 +145,6 @@ func (d *DataUploadBuilder) Progress(progress shared.DataMoveOperationProgress)
return d
}
// IncrementalBytes sets the DataUpload's IncrementalBytes.
func (d *DataUploadBuilder) IncrementalBytes(incrementalBytes int64) *DataUploadBuilder {
d.object.Status.IncrementalBytes = incrementalBytes
return d
}
// Node sets the DataUpload's Node.
func (d *DataUploadBuilder) Node(node string) *DataUploadBuilder {
d.object.Status.Node = node
@@ -186,9 +180,3 @@ func (d *DataUploadBuilder) Message(msg string) *DataUploadBuilder {
d.object.Status.Message = msg
return d
}
// TotalBytes sets the DataUpload's TotalBytes.
func (d *DataUploadBuilder) TotalBytes(size int64) *DataUploadBuilder {
d.object.Status.Progress.TotalBytes = size
return d
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package builder
import (
corev1api "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -88,9 +87,3 @@ func (b *StorageClassBuilder) Provisioner(provisioner string) *StorageClassBuild
b.object.Provisioner = provisioner
return b
}
// ReclaimPolicy sets StorageClass's reclaimPolicy.
func (b *StorageClassBuilder) ReclaimPolicy(policy corev1api.PersistentVolumeReclaimPolicy) *StorageClassBuilder {
b.object.ReclaimPolicy = &policy
return b
}

View File

@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
if err != nil {

View File

@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
if os.Getenv(cmdtest.CaptureFlag) == "1" {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)

View File

@@ -91,7 +91,7 @@ func TestNewDownloadCommand(t *testing.T) {
assert.NoError(t, e)
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
_, stderr, err := veleroexec.RunCommand(cmd)

View File

@@ -63,7 +63,7 @@ func TestNewGetCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
require.NoError(t, err)
@@ -84,7 +84,7 @@ func TestNewGetCommand(t *testing.T) {
e = d.Execute()
require.NoError(t, e)
cmd = exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd = exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err = veleroexec.RunCommand(cmd)
require.NoError(t, err)

View File

@@ -66,7 +66,7 @@ func TestNewDeleteCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDeleteCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)

View File

@@ -50,7 +50,7 @@ func TestNewGetCommand(t *testing.T) {
c.Execute()
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
_, stderr, err := veleroexec.RunCommand(cmd)

View File

@@ -99,7 +99,7 @@ func TestSetCommand_Execute(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
_, stderr, err := veleroexec.RunCommand(cmd)

View File

@@ -18,7 +18,6 @@ package bug
import (
"bytes"
"context"
"errors"
"fmt"
"net/url"
@@ -148,7 +147,7 @@ func getKubectlVersion() (string, error) {
return "", errors.New("kubectl not found on PATH")
}
kubectlCmd := exec.CommandContext(context.Background(), "kubectl", "version")
kubectlCmd := exec.Command("kubectl", "version")
var outbuf bytes.Buffer
kubectlCmd.Stdout = &outbuf
if err := kubectlCmd.Start(); err != nil {
@@ -208,17 +207,16 @@ func renderToString(bugInfo *VeleroBugInfo) (string, error) {
// a platform specific binary.
func showIssueInBrowser(body string) error {
url := issueURL + "?body=" + url.QueryEscape(body)
ctx := context.Background()
switch runtime.GOOS {
case "darwin":
return exec.CommandContext(ctx, "open", url).Start()
return exec.Command("open", url).Start()
case "linux":
if cmdExistsOnPath("xdg-open") {
return exec.CommandContext(ctx, "xdg-open", url).Start()
return exec.Command("xdg-open", url).Start()
}
return fmt.Errorf("velero can't open a browser window using the command '%s'", "xdg-open")
case "windows":
return exec.CommandContext(ctx, "rundll32", "url.dll,FileProtocolHandler", url).Start()
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
default:
return fmt.Errorf("velero can't open a browser window on platform %s", runtime.GOOS)
}

View File

@@ -53,7 +53,6 @@ type dataMoverRestoreConfig struct {
volumePath string
volumeMode string
ddName string
cacheDir string
resourceTimeout time.Duration
}
@@ -90,7 +89,6 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.volumeMode, "volume-mode", config.volumeMode, "The mode of the volume to be restored")
command.Flags().StringVar(&config.ddName, "data-download", config.ddName, "The data download name")
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
@@ -290,5 +288,5 @@ func (s *dataMoverRestore) createDataPathService() (dataPathService, error) {
return datamover.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.ddName, s.namespace, s.nodeName, datapath.AccessPoint{
ByPath: s.config.volumePath,
VolMode: uploader.PersistentVolumeMode(s.config.volumeMode),
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.config.cacheDir, s.logger), nil
}, s.dataPathMgr, repoEnsurer, credGetter, duInformer, s.logger), nil
}

View File

@@ -60,7 +60,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@@ -85,7 +84,6 @@ type nodeAgentServerConfig struct {
resourceTimeout time.Duration
dataMoverPrepareTimeout time.Duration
nodeAgentConfig string
backupRepoConfig string
}
func NewServerCommand(f client.Factory) *cobra.Command {
@@ -123,7 +121,6 @@ func NewServerCommand(f client.Factory) *cobra.Command {
command.Flags().DurationVar(&config.dataMoverPrepareTimeout, "data-mover-prepare-timeout", config.dataMoverPrepareTimeout, "How long to wait for preparing a DataUpload/DataDownload. Default is 30 minutes.")
command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics")
command.Flags().StringVar(&config.nodeAgentConfig, "node-agent-configmap", config.nodeAgentConfig, "The name of ConfigMap containing node-agent configurations.")
command.Flags().StringVar(&config.backupRepoConfig, "backup-repository-configmap", config.backupRepoConfig, "The name of ConfigMap containing backup repository configurations.")
return command
}
@@ -143,9 +140,7 @@ type nodeAgentServer struct {
csiSnapshotClient *snapshotv1client.Clientset
dataPathMgr *datapath.Manager
dataPathConfigs *velerotypes.NodeAgentConfigs
backupRepoConfigs map[string]string
vgdpCounter *exposer.VgdpCounter
repoConfigMgr repository.ConfigManager
}
func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, config nodeAgentServerConfig) (*nodeAgentServer, error) {
@@ -239,7 +234,6 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
namespace: factory.Namespace(),
nodeName: nodeName,
metricsAddress: config.metricsAddress,
repoConfigMgr: repository.NewConfigManager(logger),
}
// the cache isn't initialized yet when "validatePodVolumesHostPath" is called, the client returned by the manager cannot
@@ -260,11 +254,6 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
if err := s.getDataPathConfigs(); err != nil {
return nil, err
}
if err := s.getBackupRepoConfigs(); err != nil {
return nil, err
}
s.dataPathMgr = datapath.NewManager(s.getDataPathConcurrentNum(defaultDataPathConcurrentNum))
return s, nil
@@ -340,30 +329,12 @@ func (s *nodeAgentServer) run() {
}
}
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
if err := s.validateCachePVCConfig(*s.dataPathConfigs.CachePVCConfig); err != nil {
s.logger.WithError(err).Warnf("Ignore cache config %v", s.dataPathConfigs.CachePVCConfig)
} else {
s.logger.Infof("Using cache volume configs %v", s.dataPathConfigs.CachePVCConfig)
}
}
var cachePVCConfig *velerotypes.CachePVC
if s.dataPathConfigs != nil && s.dataPathConfigs.CachePVCConfig != nil {
cachePVCConfig = s.dataPathConfigs.CachePVCConfig
s.logger.Infof("Using customized cachePVC config %v", cachePVCConfig)
}
if s.backupRepoConfigs != nil {
s.logger.Infof("Using backup repo config %v", s.backupRepoConfigs)
}
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
}
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, s.backupRepoConfigs, cachePVCConfig, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup, s.repoConfigMgr)
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup)
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}
@@ -407,15 +378,12 @@ func (s *nodeAgentServer) run() {
s.vgdpCounter,
loadAffinity,
restorePVCConfig,
s.backupRepoConfigs,
cachePVCConfig,
podResources,
s.nodeName,
s.config.dataMoverPrepareTimeout,
s.logger,
s.metrics,
dataMovePriorityClass,
s.repoConfigMgr,
)
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
@@ -589,32 +557,14 @@ func (s *nodeAgentServer) getDataPathConfigs() error {
configs, err := getConfigsFunc(s.ctx, s.namespace, s.kubeClient, s.config.nodeAgentConfig)
if err != nil {
return errors.Wrapf(err, "error getting node agent configs from configMap %s", s.config.nodeAgentConfig)
s.logger.WithError(err).Errorf("Failed to get node agent configs from configMap %s, ignore it", s.config.nodeAgentConfig)
return err
}
s.dataPathConfigs = configs
return nil
}
func (s *nodeAgentServer) getBackupRepoConfigs() error {
if s.config.backupRepoConfig == "" {
s.logger.Info("No backup repo configMap is specified")
return nil
}
cm, err := s.kubeClient.CoreV1().ConfigMaps(s.namespace).Get(s.ctx, s.config.backupRepoConfig, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error getting backup repo configs from configMap %s", s.config.backupRepoConfig)
}
if cm.Data == nil {
return errors.Errorf("no data is in the backup repo configMap %s", s.config.backupRepoConfig)
}
s.backupRepoConfigs = cm.Data
return nil
}
func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
configs := s.dataPathConfigs
@@ -670,20 +620,3 @@ func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
return concurrentNum
}
func (s *nodeAgentServer) validateCachePVCConfig(config velerotypes.CachePVC) error {
if config.StorageClass == "" {
return errors.New("storage class is absent")
}
sc, err := s.kubeClient.StorageV1().StorageClasses().Get(s.ctx, config.StorageClass, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error getting storage class %s", config.StorageClass)
}
if sc.ReclaimPolicy != nil && *sc.ReclaimPolicy != corev1api.PersistentVolumeReclaimDelete {
return errors.Errorf("unexpected storage class reclaim policy %v", *sc.ReclaimPolicy)
}
return nil
}

View File

@@ -24,7 +24,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -35,8 +34,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/nodeagent"
testutil "github.com/vmware-tanzu/velero/pkg/test"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
func Test_validatePodVolumesHostPath(t *testing.T) {
@@ -145,10 +142,11 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error)
configMapName string
expectConfigs *velerotypes.NodeAgentConfigs
expectedErr string
expectLog string
}{
{
name: "no config specified",
name: "no config specified",
expectLog: "No node-agent configMap is specified",
},
{
name: "failed to get configs",
@@ -156,7 +154,7 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
return nil, errors.New("fake-get-error")
},
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-get-error",
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
},
{
name: "configs cm not found",
@@ -164,7 +162,7 @@ func Test_getDataPathConfigs(t *testing.T) {
getFunc: func(context.Context, string, kubernetes.Interface, string) (*velerotypes.NodeAgentConfigs, error) {
return nil, errors.New("fake-not-found-error")
},
expectedErr: "error getting node agent configs from configMap node-agent-config: fake-not-found-error",
expectLog: "Failed to get node agent configs from configMap node-agent-config, ignore it",
},
{
@@ -179,21 +177,23 @@ func Test_getDataPathConfigs(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logBuffer := ""
s := &nodeAgentServer{
config: nodeAgentServerConfig{
nodeAgentConfig: test.configMapName,
},
logger: testutil.NewLogger(),
logger: testutil.NewSingleLogger(&logBuffer),
}
getConfigsFunc = test.getFunc
err := s.getDataPathConfigs()
if test.expectedErr == "" {
require.NoError(t, err)
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
s.getDataPathConfigs()
assert.Equal(t, test.expectConfigs, s.dataPathConfigs)
if test.expectLog == "" {
assert.Empty(t, logBuffer)
} else {
require.EqualError(t, err, test.expectedErr)
assert.Contains(t, logBuffer, test.expectLog)
}
})
}
@@ -416,117 +416,3 @@ func Test_getDataPathConcurrentNum(t *testing.T) {
})
}
}
func TestGetBackupRepoConfigs(t *testing.T) {
cmNoData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Result()
cmWithData := builder.ForConfigMap(velerov1api.DefaultNamespace, "backup-repo-config").Data("cacheLimit", "100").Result()
tests := []struct {
name string
configMapName string
kubeClientObj []runtime.Object
expectConfigs map[string]string
expectedErr string
}{
{
name: "no config specified",
},
{
name: "failed to get configs",
configMapName: "backup-repo-config",
expectedErr: "error getting backup repo configs from configMap backup-repo-config: configmaps \"backup-repo-config\" not found",
},
{
name: "configs data not found",
kubeClientObj: []runtime.Object{cmNoData},
configMapName: "backup-repo-config",
expectedErr: "no data is in the backup repo configMap backup-repo-config",
},
{
name: "succeed",
configMapName: "backup-repo-config",
kubeClientObj: []runtime.Object{cmWithData},
expectConfigs: map[string]string{"cacheLimit": "100"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
s := &nodeAgentServer{
namespace: velerov1api.DefaultNamespace,
kubeClient: fakeKubeClient,
config: nodeAgentServerConfig{
backupRepoConfig: test.configMapName,
},
logger: testutil.NewLogger(),
}
err := s.getBackupRepoConfigs()
if test.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, test.expectConfigs, s.backupRepoConfigs)
} else {
require.EqualError(t, err, test.expectedErr)
}
})
}
}
func TestValidateCachePVCConfig(t *testing.T) {
scWithRetainPolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()
scWithDeletePolicy := builder.ForStorageClass("fake-storage-class").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result()
scWithNoPolicy := builder.ForStorageClass("fake-storage-class").Result()
tests := []struct {
name string
config velerotypes.CachePVC
kubeClientObj []runtime.Object
expectedErr string
}{
{
name: "no storage class",
expectedErr: "storage class is absent",
},
{
name: "failed to get storage class",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
expectedErr: "error getting storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found",
},
{
name: "storage class reclaim policy is not expected",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithRetainPolicy},
expectedErr: "unexpected storage class reclaim policy Retain",
},
{
name: "storage class reclaim policy is delete",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithDeletePolicy},
},
{
name: "storage class with no reclaim policy",
config: velerotypes.CachePVC{StorageClass: "fake-storage-class"},
kubeClientObj: []runtime.Object{scWithNoPolicy},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
s := &nodeAgentServer{
kubeClient: fakeKubeClient,
}
err := s.validateCachePVCConfig(test.config)
if test.expectedErr == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, test.expectedErr)
}
})
}
}

View File

@@ -51,7 +51,6 @@ import (
type podVolumeRestoreConfig struct {
volumePath string
pvrName string
cacheDir string
resourceTimeout time.Duration
}
@@ -87,7 +86,6 @@ func NewRestoreCommand(f client.Factory) *cobra.Command {
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.pvrName, "pod-volume-restore", config.pvrName, "The PVR name")
command.Flags().StringVar(&config.cacheDir, "cache-volume-path", config.cacheDir, "The full path of the cache volume")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
@@ -296,5 +294,5 @@ func (s *podVolumeRestore) createDataPathService() (dataPathService, error) {
return podvolume.NewRestoreMicroService(s.ctx, s.client, s.kubeClient, s.config.pvrName, s.namespace, s.nodeName, datapath.AccessPoint{
ByPath: s.config.volumePath,
VolMode: uploader.PersistentVolumeFilesystem,
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.config.cacheDir, s.logger), nil
}, s.dataPathMgr, repoEnsurer, credGetter, pvrInformer, s.logger), nil
}

View File

@@ -75,7 +75,7 @@ func TestDeleteCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestDeleteCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
if err != nil {

View File

@@ -63,7 +63,7 @@ func TestNewDescribeCommand(t *testing.T) {
if os.Getenv(cmdtest.CaptureFlag) == "1" {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDescribeCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)

View File

@@ -62,7 +62,7 @@ func TestNewGetCommand(t *testing.T) {
return
}
cmd := exec.CommandContext(t.Context(), os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag))
stdout, _, err := veleroexec.RunCommand(cmd)
require.NoError(t, err)

View File

@@ -713,9 +713,6 @@ func describeDataMovement(d *Describer, details bool, info *volume.BackupVolumeI
d.Printf("\t\t\t\tData Mover: %s\n", dataMover)
d.Printf("\t\t\t\tUploader Type: %s\n", info.SnapshotDataMovementInfo.UploaderType)
d.Printf("\t\t\t\tMoved data Size (bytes): %d\n", info.SnapshotDataMovementInfo.Size)
if info.SnapshotDataMovementInfo.IncrementalSize > 0 {
d.Printf("\t\t\t\tIncremental data Size (bytes): %d\n", info.SnapshotDataMovementInfo.IncrementalSize)
}
d.Printf("\t\t\t\tResult: %s\n", info.Result)
} else {
d.Printf("\t\t\tData Movement: %s\n", "included, specify --details for more information")
@@ -838,7 +835,7 @@ func describePodVolumeBackups(d *Describer, details bool, podVolumeBackups []vel
backupsByPod := new(volumesByPod)
for _, backup := range backupsByPhase[phase] {
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress, backup.Status.IncrementalBytes)
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress)
}
d.Printf("\t\t%s:\n", phase)
@@ -888,8 +885,7 @@ type volumesByPod struct {
// Add adds a pod volume with the specified pod namespace, name
// and volume to the appropriate group.
// Used for both backup and restore
func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veleroapishared.DataMoveOperationProgress, incrementalBytes int64) {
func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veleroapishared.DataMoveOperationProgress) {
if v.volumesByPodMap == nil {
v.volumesByPodMap = make(map[string]*podVolumeGroup)
}
@@ -899,12 +895,6 @@ func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veler
// append backup progress percentage if backup is in progress
if phase == "In Progress" && progress.TotalBytes != 0 {
volume = fmt.Sprintf("%s (%.2f%%)", volume, float64(progress.BytesDone)/float64(progress.TotalBytes)*100)
} else if phase == string(velerov1api.PodVolumeBackupPhaseCompleted) && incrementalBytes > 0 {
volume = fmt.Sprintf("%s (size: %v, incremental size: %v)", volume, progress.TotalBytes, incrementalBytes)
} else if (phase == string(velerov1api.PodVolumeBackupPhaseCompleted) ||
phase == string(velerov1api.PodVolumeRestorePhaseCompleted)) &&
progress.TotalBytes > 0 {
volume = fmt.Sprintf("%s (size: %v)", volume, progress.TotalBytes)
}
if group, ok := v.volumesByPodMap[key]; !ok {

View File

@@ -597,12 +597,11 @@ func TestCSISnapshots(t *testing.T) {
Result: volume.VolumeResultFailed,
SnapshotDataMoved: true,
SnapshotDataMovementInfo: &volume.SnapshotDataMovementInfo{
UploaderType: "fake-uploader",
SnapshotHandle: "fake-repo-id-5",
OperationID: "fake-operation-5",
Size: 100,
IncrementalSize: 50,
Phase: velerov2alpha1.DataUploadPhaseFailed,
UploaderType: "fake-uploader",
SnapshotHandle: "fake-repo-id-5",
OperationID: "fake-operation-5",
Size: 100,
Phase: velerov2alpha1.DataUploadPhaseFailed,
},
},
},
@@ -614,7 +613,6 @@ func TestCSISnapshots(t *testing.T) {
Data Mover: velero
Uploader Type: fake-uploader
Moved data Size (bytes): 100
Incremental data Size (bytes): 50
Result: failed
`,
},

View File

@@ -464,10 +464,6 @@ func describeDataMovementInSF(details bool, info *volume.BackupVolumeInfo, snaps
dataMovement["uploaderType"] = info.SnapshotDataMovementInfo.UploaderType
dataMovement["result"] = string(info.Result)
if info.SnapshotDataMovementInfo.Size > 0 || info.SnapshotDataMovementInfo.IncrementalSize > 0 {
dataMovement["size"] = info.SnapshotDataMovementInfo.Size
dataMovement["incrementalSize"] = info.SnapshotDataMovementInfo.IncrementalSize
}
snapshotDetail["dataMovement"] = dataMovement
} else {
@@ -538,7 +534,7 @@ func describePodVolumeBackupsInSF(backups []velerov1api.PodVolumeBackup, details
// group the backups in the current phase by pod (i.e. "ns/name")
backupsByPod := new(volumesByPod)
for _, backup := range backupsByPhase[phase] {
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress, backup.Status.IncrementalBytes)
backupsByPod.Add(backup.Spec.Pod.Namespace, backup.Spec.Pod.Name, backup.Spec.Volume, phase, backup.Status.Progress)
}
backupsByPods := make([]map[string]string, 0)

View File

@@ -408,7 +408,7 @@ func describePodVolumeRestores(d *Describer, restores []velerov1api.PodVolumeRes
restoresByPod := new(volumesByPod)
for _, restore := range restoresByPhase[phase] {
restoresByPod.Add(restore.Spec.Pod.Namespace, restore.Spec.Pod.Name, restore.Spec.Volume, phase, restore.Status.Progress, 0)
restoresByPod.Add(restore.Spec.Pod.Namespace, restore.Spec.Pod.Name, restore.Spec.Volume, phase, restore.Status.Progress)
}
d.Printf("\t%s:\n", phase)

View File

@@ -608,7 +608,7 @@ func getBackupRepositoryConfig(ctx context.Context, ctrlClient client.Client, co
jsonData, found := loc.Data[repoType]
if !found {
log.Infof("No data for repo type %s in config map %s", repoType, configName)
log.Info("No data for repo type %s in config map %s", repoType, configName)
return nil, nil
}

View File

@@ -49,7 +49,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
@@ -69,14 +68,11 @@ type DataDownloadReconciler struct {
vgdpCounter *exposer.VgdpCounter
loadAffinity []*kube.LoadAffinity
restorePVCConfig velerotypes.RestorePVC
backupRepoConfigs map[string]string
cacheVolumeConfigs *velerotypes.CachePVC
podResources corev1api.ResourceRequirements
preparingTimeout time.Duration
metrics *metrics.ServerMetrics
cancelledDataDownload map[string]time.Time
dataMovePriorityClass string
repoConfigMgr repository.ConfigManager
}
func NewDataDownloadReconciler(
@@ -87,15 +83,12 @@ func NewDataDownloadReconciler(
counter *exposer.VgdpCounter,
loadAffinity []*kube.LoadAffinity,
restorePVCConfig velerotypes.RestorePVC,
backupRepoConfigs map[string]string,
cacheVolumeConfigs *velerotypes.CachePVC,
podResources corev1api.ResourceRequirements,
nodeName string,
preparingTimeout time.Duration,
logger logrus.FieldLogger,
metrics *metrics.ServerMetrics,
dataMovePriorityClass string,
repoConfigMgr repository.ConfigManager,
) *DataDownloadReconciler {
return &DataDownloadReconciler{
client: client,
@@ -106,8 +99,6 @@ func NewDataDownloadReconciler(
nodeName: nodeName,
restoreExposer: exposer.NewGenericRestoreExposer(kubeClient, logger),
restorePVCConfig: restorePVCConfig,
backupRepoConfigs: backupRepoConfigs,
cacheVolumeConfigs: cacheVolumeConfigs,
dataPathMgr: dataPathMgr,
vgdpCounter: counter,
loadAffinity: loadAffinity,
@@ -116,7 +107,6 @@ func NewDataDownloadReconciler(
metrics: metrics,
cancelledDataDownload: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
repoConfigMgr: repoConfigMgr,
}
}
@@ -892,19 +882,6 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
}
}
var cacheVolume *exposer.CacheConfigs
if r.cacheVolumeConfigs != nil {
if limit, err := r.repoConfigMgr.ClientSideCacheLimit(velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs); err != nil {
log.WithError(err).Warnf("Failed to get client side cache limit for repo type %s from configs %v", velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs)
} else {
cacheVolume = &exposer.CacheConfigs{
Limit: limit,
StorageClass: r.cacheVolumeConfigs.StorageClass,
ResidentThreshold: r.cacheVolumeConfigs.ResidentThreshold,
}
}
}
return exposer.GenericRestoreExposeParam{
TargetPVCName: dd.Spec.TargetVolume.PVC,
TargetNamespace: dd.Spec.TargetVolume.Namespace,
@@ -918,8 +895,6 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
RestorePVCConfig: r.restorePVCConfig,
LoadAffinity: r.loadAffinity,
PriorityClassName: r.dataMovePriorityClass,
RestoreSize: dd.Spec.SnapshotSize,
CacheVolume: cacheVolume,
}, nil
}

View File

@@ -129,7 +129,7 @@ func initDataDownloadReconcilerWithError(t *testing.T, objects []any, needError
dataPathMgr := datapath.NewManager(1)
return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, nil, nil, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), "", nil), nil
return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), ""), nil
}
func TestDataDownloadReconcile(t *testing.T) {

View File

@@ -493,7 +493,6 @@ func (r *DataUploadReconciler) OnDataUploadCompleted(ctx context.Context, namesp
du.Status.Path = result.Backup.Source.ByPath
du.Status.Phase = velerov2alpha1api.DataUploadPhaseCompleted
du.Status.SnapshotID = result.Backup.SnapshotID
du.Status.IncrementalBytes = result.Backup.IncrementalBytes
du.Status.CompletionTimestamp = &metav1.Time{Time: r.Clock.Now()}
if result.Backup.EmptySnapshot {
du.Status.Message = "volume was empty so no data was upload"

View File

@@ -850,20 +850,11 @@ func TestOnDataUploadCompleted(t *testing.T) {
// Add the DataUpload object to the fake client
require.NoError(t, r.client.Create(ctx, du))
r.snapshotExposerList = map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{velerov2alpha1api.SnapshotTypeCSI: exposer.NewCSISnapshotExposer(r.kubeClient, r.csiSnapshotClient, velerotest.NewLogger())}
r.OnDataUploadCompleted(ctx, namespace, duName, datapath.Result{
Backup: datapath.BackupResult{
SnapshotID: "fake-id",
Source: datapath.AccessPoint{
ByPath: "fake-path",
},
},
})
r.OnDataUploadCompleted(ctx, namespace, duName, datapath.Result{})
updatedDu := &velerov2alpha1api.DataUpload{}
require.NoError(t, r.client.Get(ctx, types.NamespacedName{Name: duName, Namespace: namespace}, updatedDu))
assert.Equal(t, velerov2alpha1api.DataUploadPhaseCompleted, updatedDu.Status.Phase)
assert.False(t, updatedDu.Status.CompletionTimestamp.IsZero())
assert.Equal(t, "fake-id", updatedDu.Status.SnapshotID)
assert.Equal(t, "fake-path", updatedDu.Status.Path)
}
func TestFindDataUploadForPod(t *testing.T) {

View File

@@ -526,7 +526,6 @@ func (r *PodVolumeBackupReconciler) OnDataPathCompleted(ctx context.Context, nam
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseCompleted
pvb.Status.SnapshotID = result.Backup.SnapshotID
pvb.Status.CompletionTimestamp = &completionTime
pvb.Status.IncrementalBytes = result.Backup.IncrementalBytes
if result.Backup.EmptySnapshot {
pvb.Status.Message = "volume was empty so no snapshot was taken"
}

View File

@@ -48,18 +48,15 @@ import (
"github.com/vmware-tanzu/velero/pkg/datapath"
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
repository "github.com/vmware-tanzu/velero/pkg/repository/manager"
"github.com/vmware-tanzu/velero/pkg/restorehelper"
velerotypes "github.com/vmware-tanzu/velero/pkg/types"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, backupRepoConfigs map[string]string,
cacheVolumeConfigs *velerotypes.CachePVC, podResources corev1api.ResourceRequirements, logger logrus.FieldLogger, dataMovePriorityClass string,
privileged bool, repoConfigMgr repository.ConfigManager) *PodVolumeRestoreReconciler {
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeRestoreReconciler {
return &PodVolumeRestoreReconciler{
client: client,
mgr: mgr,
@@ -68,8 +65,6 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
nodeName: nodeName,
clock: &clocks.RealClock{},
podResources: podResources,
backupRepoConfigs: backupRepoConfigs,
cacheVolumeConfigs: cacheVolumeConfigs,
dataPathMgr: dataPathMgr,
vgdpCounter: counter,
preparingTimeout: preparingTimeout,
@@ -78,7 +73,6 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
cancelledPVR: make(map[string]time.Time),
dataMovePriorityClass: dataMovePriorityClass,
privileged: privileged,
repoConfigMgr: repoConfigMgr,
}
}
@@ -90,8 +84,6 @@ type PodVolumeRestoreReconciler struct {
nodeName string
clock clocks.WithTickerAndDelayedExecution
podResources corev1api.ResourceRequirements
backupRepoConfigs map[string]string
cacheVolumeConfigs *velerotypes.CachePVC
exposer exposer.PodVolumeExposer
dataPathMgr *datapath.Manager
vgdpCounter *exposer.VgdpCounter
@@ -100,7 +92,6 @@ type PodVolumeRestoreReconciler struct {
cancelledPVR map[string]time.Time
dataMovePriorityClass string
privileged bool
repoConfigMgr repository.ConfigManager
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
@@ -895,19 +886,6 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
}
}
var cacheVolume *exposer.CacheConfigs
if r.cacheVolumeConfigs != nil {
if limit, err := r.repoConfigMgr.ClientSideCacheLimit(velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs); err != nil {
log.WithError(err).Warnf("Failed to get client side cache limit for repo type %s from configs %v", velerov1api.BackupRepositoryTypeKopia, r.backupRepoConfigs)
} else {
cacheVolume = &exposer.CacheConfigs{
Limit: limit,
StorageClass: r.cacheVolumeConfigs.StorageClass,
ResidentThreshold: r.cacheVolumeConfigs.ResidentThreshold,
}
}
}
return exposer.PodVolumeExposeParam{
Type: exposer.PodVolumeExposeTypeRestore,
ClientNamespace: pvr.Spec.Pod.Namespace,
@@ -918,8 +896,6 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
HostingPodTolerations: hostingPodTolerations,
OperationTimeout: r.resourceTimeout,
Resources: r.podResources,
RestoreSize: pvr.Spec.SnapshotSize,
CacheVolume: cacheVolume,
// Priority class name for the data mover pod, retrieved from node-agent-configmap
PriorityClassName: r.dataMovePriorityClass,
Privileged: r.privileged,

View File

@@ -617,7 +617,7 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
dataPathMgr := datapath.NewManager(1)
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, nil, nil, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false, nil), nil
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false), nil
}
func TestPodVolumeRestoreReconcile(t *testing.T) {

View File

@@ -156,7 +156,7 @@ func TestOnDataUploadCompleted(t *testing.T) {
{
name: "marshal fail",
marshalErr: errors.New("fake-marshal-error"),
expectedErr: "Failed to marshal backup result { false { } 0 0}: fake-marshal-error",
expectedErr: "Failed to marshal backup result { false { } 0}: fake-marshal-error",
},
{
name: "succeed",

View File

@@ -61,12 +61,11 @@ type RestoreMicroService struct {
ddInformer cache.Informer
ddHandler cachetool.ResourceEventHandlerRegistration
nodeName string
cacheDir string
}
func NewRestoreMicroService(ctx context.Context, client client.Client, kubeClient kubernetes.Interface, dataDownloadName string, namespace string, nodeName string,
sourceTargetPath datapath.AccessPoint, dataPathMgr *datapath.Manager, repoEnsurer *repository.Ensurer, cred *credentials.CredentialGetter,
ddInformer cache.Informer, cacheDir string, log logrus.FieldLogger) *RestoreMicroService {
ddInformer cache.Informer, log logrus.FieldLogger) *RestoreMicroService {
return &RestoreMicroService{
ctx: ctx,
client: client,
@@ -81,7 +80,6 @@ func NewRestoreMicroService(ctx context.Context, client client.Client, kubeClien
nodeName: nodeName,
resultSignal: make(chan dataPathResult),
ddInformer: ddInformer,
cacheDir: cacheDir,
}
}
@@ -174,7 +172,6 @@ func (r *RestoreMicroService) RunCancelableDataPath(ctx context.Context) (string
RepoIdentifier: "",
RepositoryEnsurer: r.repoEnsurer,
CredentialGetter: r.credentialGetter,
CacheDir: r.cacheDir,
}); err != nil {
return "", errors.Wrap(err, "error to initialize data path")
}

View File

@@ -44,7 +44,6 @@ type FSBRInitParam struct {
RepositoryEnsurer *repository.Ensurer
CredentialGetter *credentials.CredentialGetter
Filesystem filesystem.Interface
CacheDir string
}
// FSBRStartParam define the input param for FSBR start
@@ -113,7 +112,7 @@ func (fs *fileSystemBR) Init(ctx context.Context, param any) error {
return errors.Wrapf(err, "error to ensure backup repository %s-%s-%s", initParam.BSLName, initParam.SourceNamespace, initParam.RepositoryType)
}
err = fs.boostRepoConnect(ctx, initParam.RepositoryType, initParam.CredentialGetter, initParam.CacheDir)
err = fs.boostRepoConnect(ctx, initParam.RepositoryType, initParam.CredentialGetter)
if err != nil {
return errors.Wrapf(err, "error to boost backup repository connection %s-%s-%s", initParam.BSLName, initParam.SourceNamespace, initParam.RepositoryType)
}
@@ -182,7 +181,7 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, uploaderConfig map[strin
fs.wgDataPath.Done()
}()
snapshotID, emptySnapshot, totalBytes, incrementalBytes, err := fs.uploaderProv.RunBackup(fs.ctx, source.ByPath, backupParam.RealSource, backupParam.Tags, backupParam.ForceFull,
snapshotID, emptySnapshot, totalBytes, err := fs.uploaderProv.RunBackup(fs.ctx, source.ByPath, backupParam.RealSource, backupParam.Tags, backupParam.ForceFull,
backupParam.ParentSnapshot, source.VolMode, uploaderConfig, fs)
if err == provider.ErrorCanceled {
@@ -194,7 +193,7 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, uploaderConfig map[strin
}
fs.callbacks.OnFailed(context.Background(), fs.namespace, fs.jobName, dataPathErr)
} else {
fs.callbacks.OnCompleted(context.Background(), fs.namespace, fs.jobName, Result{Backup: BackupResult{snapshotID, emptySnapshot, source, totalBytes, incrementalBytes}})
fs.callbacks.OnCompleted(context.Background(), fs.namespace, fs.jobName, Result{Backup: BackupResult{snapshotID, emptySnapshot, source, totalBytes}})
}
}()
@@ -246,9 +245,9 @@ func (fs *fileSystemBR) Cancel() {
fs.log.WithField("user", fs.jobName).Info("FileSystemBR is canceled")
}
func (fs *fileSystemBR) boostRepoConnect(ctx context.Context, repositoryType string, credentialGetter *credentials.CredentialGetter, cacheDir string) error {
func (fs *fileSystemBR) boostRepoConnect(ctx context.Context, repositoryType string, credentialGetter *credentials.CredentialGetter) error {
if repositoryType == velerov1api.BackupRepositoryTypeKopia {
if err := repoProvider.NewUnifiedRepoProvider(*credentialGetter, repositoryType, fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo, CacheDir: cacheDir}); err != nil {
if err := repoProvider.NewUnifiedRepoProvider(*credentialGetter, repositoryType, fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo}); err != nil {
return err
}
} else {

View File

@@ -96,7 +96,7 @@ func TestAsyncBackup(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fs := newFileSystemBR("job-1", "test", nil, "velero", Callbacks{}, velerotest.NewLogger()).(*fileSystemBR)
mockProvider := providerMock.NewProvider(t)
mockProvider.On("RunBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(test.result.Backup.SnapshotID, test.result.Backup.EmptySnapshot, test.result.Backup.TotalBytes, test.result.Backup.IncrementalBytes, test.err)
mockProvider.On("RunBackup", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(test.result.Backup.SnapshotID, test.result.Backup.EmptySnapshot, test.result.Backup.TotalBytes, test.err)
mockProvider.On("Close", mock.Anything).Return(nil)
fs.uploaderProv = mockProvider
fs.initialized = true

View File

@@ -30,11 +30,10 @@ type Result struct {
// BackupResult represents the result of a backup
type BackupResult struct {
SnapshotID string `json:"snapshotID"`
EmptySnapshot bool `json:"emptySnapshot"`
Source AccessPoint `json:"source,omitempty"`
TotalBytes int64 `json:"totalBytes,omitempty"`
IncrementalBytes int64 `json:"incrementalBytes,omitempty"`
SnapshotID string `json:"snapshotID"`
EmptySnapshot bool `json:"emptySnapshot"`
Source AccessPoint `json:"source,omitempty"`
TotalBytes int64 `json:"totalBytes,omitempty"`
}
// RestoreResult represents the result of a restore

View File

@@ -1,99 +0,0 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exposer
import (
"context"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
type CacheConfigs struct {
Limit int64
StorageClass string
ResidentThreshold int64
}
const (
cacheVolumeName = "cachedir"
cacheVolumeDirSuffix = "-cache"
)
func createCachePVC(ctx context.Context, pvcClient corev1client.CoreV1Interface, ownerObject corev1api.ObjectReference, sc string, size int64, selectedNode string) (*corev1api.PersistentVolumeClaim, error) {
cachePVCName := getCachePVCName(ownerObject)
volumeMode := corev1api.PersistentVolumeFilesystem
pvcObj := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: ownerObject.Namespace,
Name: cachePVCName,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ownerObject.APIVersion,
Kind: ownerObject.Kind,
Name: ownerObject.Name,
UID: ownerObject.UID,
Controller: boolptr.True(),
},
},
},
Spec: corev1api.PersistentVolumeClaimSpec{
AccessModes: []corev1api.PersistentVolumeAccessMode{corev1api.ReadWriteOnce},
StorageClassName: &sc,
VolumeMode: &volumeMode,
Resources: corev1api.VolumeResourceRequirements{
Requests: corev1api.ResourceList{
corev1api.ResourceStorage: *resource.NewQuantity(size, resource.BinarySI),
},
},
},
}
if selectedNode != "" {
pvcObj.Annotations = map[string]string{
kube.KubeAnnSelectedNode: selectedNode,
}
}
return pvcClient.PersistentVolumeClaims(pvcObj.Namespace).Create(ctx, pvcObj, metav1.CreateOptions{})
}
func getCachePVCName(ownerObject corev1api.ObjectReference) string {
return ownerObject.Name + cacheVolumeDirSuffix
}
func getCacheVolumeSize(dataSize int64, info *CacheConfigs) int64 {
if info == nil {
return 0
}
if dataSize != 0 && dataSize < info.ResidentThreshold {
return 0
}
// 20% inflate and round up to GB
volumeSize := (info.Limit*12/10 + (1 << 30) - 1) / (1 << 30) * (1 << 30)
return volumeSize
}

View File

@@ -1,80 +0,0 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exposer
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetCacheVolumeSize(t *testing.T) {
tests := []struct {
name string
dataSize int64
info *CacheConfigs
expected int64
}{
{
name: "nil info",
dataSize: 1024,
expected: 0,
},
{
name: "0 data size",
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 5120},
expected: 2 << 30,
},
{
name: "0 threshold",
dataSize: 2048,
info: &CacheConfigs{Limit: 1 << 30},
expected: 2 << 30,
},
{
name: "data size is smaller",
dataSize: 2048,
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 5120},
expected: 0,
},
{
name: "data size is lager",
dataSize: 2048,
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 1024},
expected: 2 << 30,
},
{
name: "limit smaller than 1G",
dataSize: 2048,
info: &CacheConfigs{Limit: 5120, ResidentThreshold: 1024},
expected: 1 << 30,
},
{
name: "larger than 1G after inflate",
dataSize: 2048,
info: &CacheConfigs{Limit: (1 << 30) - 1024, ResidentThreshold: 1024},
expected: 2 << 30,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
size := getCacheVolumeSize(test.dataSize, test.info)
require.Equal(t, test.expected, size)
})
}
}

View File

@@ -73,12 +73,6 @@ type GenericRestoreExposeParam struct {
// PriorityClassName is the priority class name for the data mover pod
PriorityClassName string
// RestoreSize specifies the data size for the volume to be restored
RestoreSize int64
// CacheVolume specifies the info for cache volumes
CacheVolume *CacheConfigs
}
// GenericRestoreExposer is the interfaces for a generic restore exposer
@@ -154,28 +148,6 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1ap
affinity := kube.GetLoadAffinityByStorageClass(param.LoadAffinity, storageClassName, curLog)
var cachePVC *corev1api.PersistentVolumeClaim
if param.CacheVolume != nil {
cacheVolumeSize := getCacheVolumeSize(param.RestoreSize, param.CacheVolume)
if cacheVolumeSize > 0 {
curLog.Infof("Creating cache PVC with size %v", cacheVolumeSize)
if pvc, err := createCachePVC(ctx, e.kubeClient.CoreV1(), ownerObject, param.CacheVolume.StorageClass, cacheVolumeSize, selectedNode); err != nil {
return errors.Wrap(err, "error to create cache pvc")
} else {
cachePVC = pvc
}
defer func() {
if err != nil {
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVC.Name, cachePVC.Namespace, 0, curLog)
}
}()
} else {
curLog.Infof("Don't need to create cache volume, restore size %v, cache info %v", param.RestoreSize, param.CacheVolume)
}
}
restorePod, err := e.createRestorePod(
ctx,
ownerObject,
@@ -189,7 +161,6 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1ap
param.NodeOS,
affinity,
param.PriorityClassName,
cachePVC,
)
if err != nil {
return errors.Wrapf(err, "error to create restore pod")
@@ -316,15 +287,6 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
}
cachePVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
if err != nil {
cachePVC = nil
if !apierrors.IsNotFound(err) {
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
}
}
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
diag += fmt.Sprintf("error listing events, err: %v\n", err)
@@ -352,18 +314,6 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
}
}
if cachePVC != nil {
diag += kube.DiagnosePVC(cachePVC, events)
if cachePVC.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, cachePVC.Spec.VolumeName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting cache pv %s, err: %v\n", cachePVC.Spec.VolumeName, err)
} else {
diag += kube.DiagnosePV(pv)
}
}
}
diag += "end diagnose restore exposer"
return diag
@@ -372,11 +322,9 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1api.ObjectReference) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
cachePVCName := getCachePVCName(ownerObject)
kube.DeletePodIfAny(ctx, e.kubeClient.CoreV1(), restorePodName, ownerObject.Namespace, e.log)
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), restorePVCName, ownerObject.Namespace, 0, e.log)
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVCName, ownerObject.Namespace, 0, e.log)
}
func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject corev1api.ObjectReference, targetPVCName string, targetNamespace string, timeout time.Duration) error {
@@ -485,7 +433,6 @@ func (e *genericRestoreExposer) createRestorePod(
nodeOS string,
affinity *kube.LoadAffinity,
priorityClassName string,
cachePVC *corev1api.PersistentVolumeClaim,
) (*corev1api.Pod, error) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
@@ -514,6 +461,7 @@ func (e *genericRestoreExposer) createRestorePod(
var gracePeriod int64
volumeMounts, volumeDevices, volumePath := kube.MakePodPVCAttachment(volumeName, targetPVC.Spec.VolumeMode, false)
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes := []corev1api.Volume{{
Name: volumeName,
@@ -523,25 +471,6 @@ func (e *genericRestoreExposer) createRestorePod(
},
},
}}
cacheVolumePath := ""
if cachePVC != nil {
mnt, _, path := kube.MakePodPVCAttachment(cacheVolumeName, nil, false)
volumeMounts = append(volumeMounts, mnt...)
volumes = append(volumes, corev1api.Volume{
Name: cacheVolumeName,
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: cachePVC.Name,
},
},
})
cacheVolumePath = path
}
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes = append(volumes, podInfo.volumes...)
if label == nil {
@@ -559,7 +488,6 @@ func (e *genericRestoreExposer) createRestorePod(
fmt.Sprintf("--volume-mode=%s", volumeMode),
fmt.Sprintf("--data-download=%s", ownerObject.Name),
fmt.Sprintf("--resource-timeout=%s", operationTimeout.String()),
fmt.Sprintf("--cache-volume-path=%s", cacheVolumePath),
}
args = append(args, podInfo.logFormatArgs...)

View File

@@ -148,7 +148,6 @@ func TestCreateRestorePodWithPriorityClass(t *testing.T) {
kube.NodeOSLinux,
nil, // affinity
tc.expectedPriorityClass,
nil,
)
require.NoError(t, err, tc.description)
@@ -228,7 +227,6 @@ func TestCreateRestorePodWithMissingConfigMap(t *testing.T) {
kube.NodeOSLinux,
nil, // affinity
"", // empty priority class since config map is missing
nil,
)
// Should succeed even when config map is missing

View File

@@ -26,7 +26,6 @@ import (
appsv1api "k8s.io/api/apps/v1"
corev1api "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -106,10 +105,6 @@ func TestRestoreExpose(t *testing.T) {
targetPVCName string
targetNamespace string
kubeReactors []reactor
cacheVolume *CacheConfigs
expectBackupPod bool
expectBackupPVC bool
expectCachePVC bool
err string
}{
{
@@ -172,70 +167,6 @@ func TestRestoreExpose(t *testing.T) {
},
err: "error to create restore pvc: fake-create-error",
},
{
name: "succeed",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
expectBackupPod: true,
expectBackupPVC: true,
},
{
name: "succeed, cache config, no cache volume",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
cacheVolume: &CacheConfigs{},
expectBackupPod: true,
expectBackupPVC: true,
},
{
name: "create cache volume fail",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
cacheVolume: &CacheConfigs{Limit: 1024},
kubeReactors: []reactor{
{
verb: "create",
resource: "persistentvolumeclaims",
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("fake-create-error")
},
},
},
err: "error to create cache pvc: fake-create-error",
},
{
name: "succeed with cache volume",
targetPVCName: "fake-target-pvc",
targetNamespace: "fake-ns",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
targetPVCObj,
daemonSet,
storageClass,
},
cacheVolume: &CacheConfigs{Limit: 1024},
expectBackupPod: true,
expectBackupPVC: true,
expectCachePVC: true,
},
}
for _, test := range tests {
@@ -272,36 +203,9 @@ func TestRestoreExpose(t *testing.T) {
Resources: corev1api.ResourceRequirements{},
ExposeTimeout: time.Millisecond,
LoadAffinity: nil,
CacheVolume: test.cacheVolume,
},
)
if test.err != "" {
require.EqualError(t, err, test.err)
} else {
require.NoError(t, err)
}
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
if test.expectBackupPod {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
if test.expectBackupPVC {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), getCachePVCName(ownerObject), metav1.GetOptions{})
if test.expectCachePVC {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
require.EqualError(t, err, test.err)
})
}
}
@@ -747,38 +651,6 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
},
}
cachePVCWithVolumeName := corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-restore-cache",
UID: "fake-cache-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: restore.APIVersion,
Kind: restore.Kind,
Name: restore.Name,
UID: restore.UID,
},
},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeClaimStatus{
Phase: corev1api.ClaimPending,
},
}
cachePV := corev1api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeStatus{
Phase: corev1api.VolumePending,
Message: "fake-pv-message",
},
}
nodeAgentPod := corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
@@ -890,44 +762,6 @@ Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
end diagnose restore exposer`,
},
{
name: "cache pvc with volume name, no pv",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&cachePVCWithVolumeName,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
error getting restore pv fake-pv, err: persistentvolumes "fake-pv" not found
PVC velero/fake-restore-cache, phase Pending, binding to fake-pv-cache
error getting cache pv fake-pv-cache, err: persistentvolumes "fake-pv-cache" not found
end diagnose restore exposer`,
},
{
name: "cache pvc with volume name, pv exists",
ownerRestore: restore,
kubeClientObj: []runtime.Object{
&restorePodWithNodeName,
&restorePVCWithVolumeName,
&cachePVCWithVolumeName,
&restorePV,
&cachePV,
&nodeAgentPod,
},
expected: `begin diagnose restore exposer
Pod velero/fake-restore, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-restore, phase Pending, binding to fake-pv
PV fake-pv, phase Pending, reason , message fake-pv-message
PVC velero/fake-restore-cache, phase Pending, binding to fake-pv-cache
PV fake-pv-cache, phase Pending, reason , message fake-pv-message
end diagnose restore exposer`,
},
{
@@ -1139,7 +973,6 @@ func TestCreateRestorePod(t *testing.T) {
test.nodeOS,
test.affinity,
"", // priority class name
nil,
)
require.NoError(t, err)

View File

@@ -76,12 +76,6 @@ type PodVolumeExposeParam struct {
// Privileged indicates whether to create the pod with a privileged container
Privileged bool
// RestoreSize specifies the data size for the volume to be restored, for restore only
RestoreSize int64
// CacheVolume specifies the info for cache volumes, for restore only
CacheVolume *CacheConfigs
}
// PodVolumeExposer is the interfaces for a pod volume exposer
@@ -162,29 +156,7 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj
curLog.WithField("path", path).Infof("Host path is retrieved for pod %s, volume %s", param.ClientPodName, param.ClientPodVolume)
var cachePVC *corev1api.PersistentVolumeClaim
if param.CacheVolume != nil {
cacheVolumeSize := getCacheVolumeSize(param.RestoreSize, param.CacheVolume)
if cacheVolumeSize > 0 {
curLog.Infof("Creating cache PVC with size %v", cacheVolumeSize)
if pvc, err := createCachePVC(ctx, e.kubeClient.CoreV1(), ownerObject, param.CacheVolume.StorageClass, cacheVolumeSize, pod.Spec.NodeName); err != nil {
return errors.Wrap(err, "error to create cache pvc")
} else {
cachePVC = pvc
}
defer func() {
if err != nil {
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVC.Name, cachePVC.Namespace, 0, curLog)
}
}()
} else {
curLog.Infof("Don't need to create cache volume, restore size %v, cache info %v", param.RestoreSize, param.CacheVolume)
}
}
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged, cachePVC)
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged)
if err != nil {
return errors.Wrapf(err, "error to create hosting pod")
}
@@ -279,15 +251,6 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
}
cachePVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
if err != nil {
cachePVC = nil
if !apierrors.IsNotFound(err) {
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
}
}
events, err := e.kubeClient.CoreV1().Events(ownerObject.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
diag += fmt.Sprintf("error listing events, err: %v\n", err)
@@ -303,18 +266,6 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
}
}
if cachePVC != nil {
diag += kube.DiagnosePVC(cachePVC, events)
if cachePVC.Spec.VolumeName != "" {
if pv, err := e.kubeClient.CoreV1().PersistentVolumes().Get(ctx, cachePVC.Spec.VolumeName, metav1.GetOptions{}); err != nil {
diag += fmt.Sprintf("error getting cache pv %s, err: %v\n", cachePVC.Spec.VolumeName, err)
} else {
diag += kube.DiagnosePV(pv)
}
}
}
diag += "end diagnose pod volume exposer"
return diag
@@ -322,14 +273,11 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.ObjectReference) {
restorePodName := ownerObject.Name
cachePVCName := getCachePVCName(ownerObject)
kube.DeletePodIfAny(ctx, e.kubeClient.CoreV1(), restorePodName, ownerObject.Namespace, e.log)
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVCName, ownerObject.Namespace, 0, e.log)
}
func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string,
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool, cachePVC *corev1api.PersistentVolumeClaim) (*corev1api.Pod, error) {
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool) (*corev1api.Pod, error) {
hostingPodName := ownerObject.Name
containerName := string(ownerObject.UID)
@@ -353,6 +301,7 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
MountPath: clientVolumePath,
MountPropagation: &mountPropagation,
}}
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes := []corev1api.Volume{{
Name: clientVolumeName,
@@ -362,25 +311,6 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
},
},
}}
cacheVolumePath := ""
if cachePVC != nil {
mnt, _, path := kube.MakePodPVCAttachment(cacheVolumeName, nil, false)
volumeMounts = append(volumeMounts, mnt...)
volumes = append(volumes, corev1api.Volume{
Name: cacheVolumeName,
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
ClaimName: cachePVC.Name,
},
},
})
cacheVolumePath = path
}
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes = append(volumes, podInfo.volumes...)
args := []string{
@@ -398,7 +328,6 @@ func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject cor
command = append(command, "backup")
} else {
args = append(args, fmt.Sprintf("--pod-volume-restore=%s", ownerObject.Name))
args = append(args, fmt.Sprintf("--cache-volume-path=%s", cacheVolumePath))
command = append(command, "restore")
}

View File

@@ -11,12 +11,10 @@ import (
"github.com/stretchr/testify/require"
appsv1api "k8s.io/api/apps/v1"
corev1api "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
clientTesting "k8s.io/client-go/testing"
clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
@@ -74,9 +72,6 @@ func TestPodVolumeExpose(t *testing.T) {
exposeParam PodVolumeExposeParam
funcGetPodVolumeHostPath func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error)
funcExtractPodVolumeHostPath func(context.Context, string, kubernetes.Interface, string, string) (string, error)
kubeReactors []reactor
expectBackupPod bool
expectCachePVC bool
err string
}{
{
@@ -194,7 +189,6 @@ func TestPodVolumeExpose(t *testing.T) {
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
},
{
name: "succeed with privileged pod",
@@ -218,89 +212,6 @@ func TestPodVolumeExpose(t *testing.T) {
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
},
{
name: "succeed, cache config, no cache volume",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
CacheVolume: &CacheConfigs{},
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
},
{
name: "create cache volume fail",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
CacheVolume: &CacheConfigs{Limit: 1024},
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
kubeReactors: []reactor{
{
verb: "create",
resource: "persistentvolumeclaims",
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("fake-create-error")
},
},
},
err: "error to create cache pvc: fake-create-error",
},
{
name: "succeed with cache volume",
ownerBackup: backup,
exposeParam: PodVolumeExposeParam{
ClientNamespace: "fake-ns",
ClientPodName: "fake-client-pod",
ClientPodVolume: "fake-client-volume",
CacheVolume: &CacheConfigs{Limit: 1024},
},
kubeClientObj: []runtime.Object{
podWithNode,
node,
daemonSet,
},
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
return datapath.AccessPoint{
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
}, nil
},
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
},
expectBackupPod: true,
expectCachePVC: true,
},
}
@@ -308,10 +219,6 @@ func TestPodVolumeExpose(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
for _, reactor := range test.kubeReactors {
fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc)
}
exposer := podVolumeExposer{
kubeClient: fakeKubeClient,
log: velerotest.NewLogger(),
@@ -341,23 +248,9 @@ func TestPodVolumeExpose(t *testing.T) {
require.NoError(t, err)
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
require.NoError(t, err)
assert.NoError(t, err)
} else {
require.EqualError(t, err, test.err)
}
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
if test.expectBackupPod {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
}
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), getCachePVCName(ownerObject), metav1.GetOptions{})
if test.expectCachePVC {
require.NoError(t, err)
} else {
require.True(t, apierrors.IsNotFound(err))
assert.EqualError(t, err, test.err)
}
})
}
@@ -624,38 +517,6 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
},
}
cachePVCWithVolumeName := corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup-cache",
UID: "fake-cache-pvc-uid",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
},
},
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeName: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeClaimStatus{
Phase: corev1api.ClaimPending,
},
}
cachePV := corev1api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pv-cache",
},
Status: corev1api.PersistentVolumeStatus{
Phase: corev1api.VolumePending,
Message: "fake-pv-message",
},
}
nodeAgentPod := corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
@@ -728,37 +589,6 @@ end diagnose pod volume exposer`,
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
end diagnose pod volume exposer`,
},
{
name: "cache pvc with volume name, no pv",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&cachePVCWithVolumeName,
&nodeAgentPod,
},
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup-cache, phase Pending, binding to fake-pv-cache
error getting cache pv fake-pv-cache, err: persistentvolumes "fake-pv-cache" not found
end diagnose pod volume exposer`,
},
{
name: "cache pvc with volume name, pv exists",
ownerBackup: backup,
kubeClientObj: []runtime.Object{
&backupPodWithNodeName,
&cachePVCWithVolumeName,
&cachePV,
&nodeAgentPod,
},
expected: `begin diagnose pod volume exposer
Pod velero/fake-backup, phase Pending, node name fake-node
Pod condition Initialized, status True, reason , message fake-pod-message
PVC velero/fake-backup-cache, phase Pending, binding to fake-pv-cache
PV fake-pv-cache, phase Pending, reason , message fake-pv-message
end diagnose pod volume exposer`,
},
{

View File

@@ -57,10 +57,6 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1api.DaemonSet
daemonSetArgs = append(daemonSetArgs, fmt.Sprintf("--node-agent-configmap=%s", c.nodeAgentConfigMap))
}
if len(c.backupRepoConfigMap) > 0 {
daemonSetArgs = append(daemonSetArgs, fmt.Sprintf("--backup-repository-configmap=%s", c.backupRepoConfigMap))
}
userID := int64(0)
mountPropagationMode := corev1api.MountPropagationHostToContainer

View File

@@ -60,10 +60,6 @@ func TestDaemonSet(t *testing.T) {
assert.Len(t, ds.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--node-agent-configmap=node-agent-config-map", ds.Spec.Template.Spec.Containers[0].Args[2])
ds = DaemonSet("velero", WithBackupRepoConfigMap("backup-repo-config-map"))
assert.Len(t, ds.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--backup-repository-configmap=backup-repo-config-map", ds.Spec.Template.Spec.Containers[0].Args[2])
ds = DaemonSet("velero", WithServiceAccountName("test-sa"))
assert.Equal(t, "test-sa", ds.Spec.Template.Spec.ServiceAccountName)

View File

@@ -22,8 +22,7 @@ func TestPkgImportNoCloudProvider(t *testing.T) {
t.Logf("Current test file path: %s", filename)
t.Logf("Current test directory: %s", filepath.Dir(filename)) // should be this package name
// go list -f {{.Deps}} ./<path-to-this-package-dir>
cmd := exec.CommandContext(
t.Context(),
cmd := exec.Command(
"go",
"list",
"-f",

View File

@@ -426,10 +426,6 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
dsOpts = append(dsOpts, WithNodeAgentConfigMap(o.NodeAgentConfigMap))
}
if len(o.BackupRepoConfigMap) > 0 {
dsOpts = append(dsOpts, WithBackupRepoConfigMap(o.BackupRepoConfigMap))
}
if len(o.KubeletRootDir) > 0 {
dsOpts = append(dsOpts, WithKubeletRootDir(o.KubeletRootDir))
}

Some files were not shown because too many files have changed in this diff Show More