mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-24 13:42:07 +00:00
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5ccc4373d | ||
|
|
327ea3ea13 | ||
|
|
4b6708de2c | ||
|
|
65eaceee0b | ||
|
|
2d8a87fec4 | ||
|
|
eae5bea469 | ||
|
|
87dbc16b0a | ||
|
|
db2193c53a | ||
|
|
643dd784ea | ||
|
|
e7166fc9e9 | ||
|
|
bfb431fcdf | ||
|
|
2d93ab261e | ||
|
|
fcb7fc9356 | ||
|
|
727a4fd0ed | ||
|
|
aa3bd251dd | ||
|
|
dad85b6fc3 | ||
|
|
78e9470028 | ||
|
|
4ba2effaac | ||
|
|
f592a264a6 | ||
|
|
e39374f335 | ||
|
|
10ef43e147 | ||
|
|
b7052c2cb1 | ||
|
|
57370296ab | ||
|
|
f4c4653c08 | ||
|
|
987edf5037 | ||
|
|
99e821a870 | ||
|
|
041e5e2a7e | ||
|
|
8e58099674 | ||
|
|
a43f14b071 | ||
|
|
26053ae6d6 | ||
|
|
60203ad01b | ||
|
|
bcdc30b59a | ||
|
|
a1026cb531 | ||
|
|
f30b9f9504 | ||
|
|
8688568ffc | ||
|
|
61bf2ef777 | ||
|
|
14b34f08cc | ||
|
|
add66eac42 | ||
|
|
20af2c20c5 | ||
|
|
60dd3dc832 | ||
|
|
a5d32f29da |
1
changelogs/unreleased/9141-kaovilai
Normal file
1
changelogs/unreleased/9141-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
feat: Enhance BackupStorageLocation with Secret-based CA certificate support
|
||||
1
changelogs/unreleased/9206-Joeavaikath
Normal file
1
changelogs/unreleased/9206-Joeavaikath
Normal file
@@ -0,0 +1 @@
|
||||
Remove labels associated with previous backups
|
||||
1
changelogs/unreleased/9321-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9321-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Sanitize Azure HTTP responses in BSL status messages
|
||||
1
changelogs/unreleased/9366-blackpiglet
Normal file
1
changelogs/unreleased/9366-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Use hookIndex for recording multiple restore exec hooks.
|
||||
1
changelogs/unreleased/9441-shubham-pampattiwar
Normal file
1
changelogs/unreleased/9441-shubham-pampattiwar
Normal file
@@ -0,0 +1 @@
|
||||
Add PVC-to-Pod cache to improve volume policy performance
|
||||
1
changelogs/unreleased/9445-mpryc
Normal file
1
changelogs/unreleased/9445-mpryc
Normal file
@@ -0,0 +1 @@
|
||||
Fix plugin init container names exceeding DNS-1123 limit
|
||||
1
changelogs/unreleased/9452-blackpiglet
Normal file
1
changelogs/unreleased/9452-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Add maintenance job and data mover pod's labels and annotations setting.
|
||||
@@ -113,10 +113,38 @@ spec:
|
||||
description: Bucket is the bucket to use for object storage.
|
||||
type: string
|
||||
caCert:
|
||||
description: CACert defines a CA bundle to use when verifying
|
||||
TLS connections to the provider.
|
||||
description: |-
|
||||
CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
Deprecated: Use CACertRef instead.
|
||||
format: byte
|
||||
type: string
|
||||
caCertRef:
|
||||
description: |-
|
||||
CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
when verifying TLS connections to the provider. The Secret must be in the same
|
||||
namespace as the BackupStorageLocation.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must be
|
||||
a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be
|
||||
defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: Prefix is the path inside a bucket to use for Velero
|
||||
storage. Optional.
|
||||
|
||||
File diff suppressed because one or more lines are too long
417
design/bsl-certificate-support_design.md
Normal file
417
design/bsl-certificate-support_design.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Design for BSL Certificate Support Enhancement
|
||||
|
||||
## Abstract
|
||||
|
||||
This design document describes the enhancement of BackupStorageLocation (BSL) certificate management in Velero, introducing a Secret-based certificate reference mechanism (`caCertRef`) alongside the existing inline certificate field (`caCert`). This enhancement provides a more secure, Kubernetes-native approach to certificate management while enabling future CLI improvements for automatic certificate discovery.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, Velero supports TLS certificate verification for object storage providers through an inline `caCert` field in the BSL specification. While functional, this approach has several limitations:
|
||||
|
||||
- **Security**: Certificates are stored directly in the BSL YAML, potentially exposing sensitive data
|
||||
- **Management**: Certificate rotation requires updating the BSL resource itself
|
||||
- **CLI Usability**: Users must manually specify certificates when using CLI commands
|
||||
- **Size Limitations**: Large certificate bundles can make BSL resources unwieldy
|
||||
|
||||
Issue #9097 and PR #8557 highlight the need for improved certificate management that addresses these concerns while maintaining backward compatibility.
|
||||
|
||||
## Goals
|
||||
|
||||
- Provide a secure, Secret-based certificate storage mechanism
|
||||
- Maintain full backward compatibility with existing BSL configurations
|
||||
- Enable future CLI enhancements for automatic certificate discovery
|
||||
- Simplify certificate rotation and management
|
||||
- Provide clear migration path for existing users
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- Removing support for inline certificates immediately
|
||||
- Changing the behavior of existing BSL configurations
|
||||
- Implementing client-side certificate validation
|
||||
- Supporting certificates from ConfigMaps or other resource types
|
||||
|
||||
## High-Level Design
|
||||
|
||||
### API Changes
|
||||
|
||||
#### New Field: CACertRef
|
||||
|
||||
```go
|
||||
type ObjectStorageLocation struct {
|
||||
// Existing field (now deprecated)
|
||||
// +optional
|
||||
// +kubebuilder:deprecatedversion:warning="caCert is deprecated, use caCertRef instead"
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
|
||||
// New field for Secret reference
|
||||
// +optional
|
||||
CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
The `SecretKeySelector` follows standard Kubernetes patterns:
|
||||
```go
|
||||
type SecretKeySelector struct {
|
||||
// Name of the Secret
|
||||
Name string `json:"name"`
|
||||
// Key within the Secret
|
||||
Key string `json:"key"`
|
||||
}
|
||||
```
|
||||
|
||||
### Certificate Resolution Logic
|
||||
|
||||
The system follows a priority-based resolution:
|
||||
|
||||
1. If `caCertRef` is specified, retrieve certificate from the referenced Secret
|
||||
2. If `caCert` is specified (and `caCertRef` is not), use the inline certificate
|
||||
3. If neither is specified, no custom CA certificate is used
|
||||
|
||||
### Validation
|
||||
|
||||
BSL validation ensures mutual exclusivity:
|
||||
```go
|
||||
func (bsl *BackupStorageLocation) Validate() error {
|
||||
if bsl.Spec.ObjectStorage != nil &&
|
||||
bsl.Spec.ObjectStorage.CACert != nil &&
|
||||
bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
return errors.New("cannot specify both caCert and caCertRef in objectStorage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### BSL Controller Changes
|
||||
|
||||
The BSL controller incorporates validation during reconciliation:
|
||||
|
||||
```go
|
||||
func (r *backupStorageLocationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
// ... existing code ...
|
||||
|
||||
// Validate BSL configuration
|
||||
if err := location.Validate(); err != nil {
|
||||
r.logger.WithError(err).Error("BSL validation failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// ... continue reconciliation ...
|
||||
}
|
||||
```
|
||||
|
||||
### Repository Provider Integration
|
||||
|
||||
All repository providers implement consistent certificate handling:
|
||||
|
||||
```go
|
||||
func configureCACert(bsl *velerov1api.BackupStorageLocation, credGetter *credentials.CredentialGetter) ([]byte, error) {
|
||||
if bsl.Spec.ObjectStorage == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Prefer caCertRef (new method)
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
certString, err := credGetter.FromSecret.Get(bsl.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
return []byte(certString), nil
|
||||
}
|
||||
|
||||
// Fall back to caCert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
return bsl.Spec.ObjectStorage.CACert, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Certificate Discovery Integration
|
||||
|
||||
#### Background: PR #8557 Implementation
|
||||
PR #8557 ("CLI automatically discovers and uses cacert from BSL") was merged in August 2025, introducing automatic CA certificate discovery from BackupStorageLocation for Velero CLI download operations. This eliminated the need for users to manually specify the `--cacert` flag when performing operations like `backup describe`, `backup download`, `backup logs`, and `restore logs`.
|
||||
|
||||
#### Current Implementation (Post PR #8557)
|
||||
The CLI now automatically discovers certificates from BSL through the `pkg/cmd/util/cacert/bsl_cacert.go` module:
|
||||
|
||||
```go
|
||||
// Current implementation only supports inline caCert
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
// ... fetch BSL ...
|
||||
if bsl.Spec.ObjectStorage != nil && len(bsl.Spec.ObjectStorage.CACert) > 0 {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Enhancement with caCertRef Support
|
||||
This design extends the existing CLI certificate discovery to support the new `caCertRef` field:
|
||||
|
||||
```go
|
||||
// Enhanced implementation supporting both caCert and caCertRef
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
// ... fetch BSL ...
|
||||
|
||||
// Prefer caCertRef over inline caCert
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
secret := &corev1api.Secret{}
|
||||
key := types.NamespacedName{
|
||||
Name: bsl.Spec.ObjectStorage.CACertRef.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
if err := client.Get(ctx, key, secret); err != nil {
|
||||
return "", errors.Wrap(err, "error getting certificate secret")
|
||||
}
|
||||
|
||||
certData, ok := secret.Data[bsl.Spec.ObjectStorage.CACertRef.Key]
|
||||
if !ok {
|
||||
return "", errors.Errorf("key %s not found in secret",
|
||||
bsl.Spec.ObjectStorage.CACertRef.Key)
|
||||
}
|
||||
return string(certData), nil
|
||||
}
|
||||
|
||||
// Fall back to inline caCert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Certificate Resolution Priority
|
||||
|
||||
The CLI follows this priority order for certificate resolution:
|
||||
|
||||
1. **`--cacert` flag** - Manual override, highest priority
|
||||
2. **`caCertRef`** - Secret-based certificate (recommended)
|
||||
3. **`caCert`** - Inline certificate (deprecated)
|
||||
4. **System certificate pool** - Default fallback
|
||||
|
||||
#### User Experience Improvements
|
||||
|
||||
With both PR #8557 and this enhancement:
|
||||
|
||||
```bash
|
||||
# Automatic discovery - works with both caCert and caCertRef
|
||||
velero backup describe my-backup
|
||||
velero backup download my-backup
|
||||
velero backup logs my-backup
|
||||
velero restore logs my-restore
|
||||
|
||||
# Manual override still available
|
||||
velero backup describe my-backup --cacert /custom/ca.crt
|
||||
|
||||
# Debug output shows certificate source
|
||||
velero backup download my-backup --log-level=debug
|
||||
# [DEBUG] Resolved CA certificate from BSL 'default' Secret 'storage-ca-cert' key 'ca-bundle.crt'
|
||||
```
|
||||
|
||||
#### RBAC Considerations for CLI
|
||||
|
||||
CLI users need read access to Secrets when using `caCertRef`:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: velero-cli-user
|
||||
namespace: velero
|
||||
rules:
|
||||
- apiGroups: ["velero.io"]
|
||||
resources: ["backups", "restores", "backupstoragelocations"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
# Limited to secrets referenced by BSLs
|
||||
```
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
#### Phase 1: Introduction (Current)
|
||||
- Add `caCertRef` field
|
||||
- Mark `caCert` as deprecated
|
||||
- Both fields supported, mutual exclusivity enforced
|
||||
|
||||
#### Phase 2: Migration Period
|
||||
- Documentation and tools to help users migrate
|
||||
- Warning messages for `caCert` usage
|
||||
- CLI enhancements to leverage `caCertRef`
|
||||
|
||||
#### Phase 3: Future Removal
|
||||
- Remove `caCert` field in major version update
|
||||
- Provide migration tool for automatic conversion
|
||||
|
||||
## User Experience
|
||||
|
||||
### Creating a BSL with Certificate Reference
|
||||
|
||||
1. Create a Secret containing the CA certificate:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-ca-cert
|
||||
namespace: velero
|
||||
type: Opaque
|
||||
data:
|
||||
ca-bundle.crt: <base64-encoded-certificate>
|
||||
```
|
||||
|
||||
2. Reference the Secret in BSL:
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: my-bucket
|
||||
caCertRef:
|
||||
name: storage-ca-cert
|
||||
key: ca-bundle.crt
|
||||
```
|
||||
|
||||
### Certificate Rotation
|
||||
|
||||
With Secret-based certificates:
|
||||
```bash
|
||||
# Update the Secret with new certificate
|
||||
kubectl create secret generic storage-ca-cert \
|
||||
--from-file=ca-bundle.crt=new-ca.crt \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# No BSL update required - changes take effect on next use
|
||||
```
|
||||
|
||||
### CLI Usage Examples
|
||||
|
||||
#### Immediate Benefits
|
||||
- No change required for existing workflows
|
||||
- Certificate validation errors include helpful context
|
||||
|
||||
#### Future CLI Enhancements
|
||||
```bash
|
||||
# Automatic certificate discovery
|
||||
velero backup download my-backup
|
||||
|
||||
# Manual override still available
|
||||
velero backup download my-backup --cacert /custom/ca.crt
|
||||
|
||||
# Debug certificate resolution
|
||||
velero backup download my-backup --log-level=debug
|
||||
# [DEBUG] Resolved CA certificate from BSL 'default' Secret 'storage-ca-cert'
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Advantages of Secret-based Storage
|
||||
|
||||
1. **Encryption at Rest**: Secrets are encrypted in etcd
|
||||
2. **RBAC Control**: Fine-grained access control via Kubernetes RBAC
|
||||
3. **Audit Trail**: Secret access is auditable
|
||||
4. **Separation of Concerns**: Certificates separate from configuration
|
||||
|
||||
### Required Permissions
|
||||
|
||||
The Velero server requires additional RBAC permissions:
|
||||
```yaml
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
# Scoped to secrets referenced by BSLs
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- Existing BSLs with `caCert` continue to function unchanged
|
||||
- No breaking changes to API
|
||||
- Gradual migration path
|
||||
|
||||
### Forward Compatibility
|
||||
|
||||
- Design allows for future enhancements:
|
||||
- Multiple certificate support
|
||||
- Certificate chain validation
|
||||
- Automatic certificate discovery from cloud providers
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Core Implementation ✓ (Current PR)
|
||||
- API changes with new `caCertRef` field
|
||||
- Controller validation
|
||||
- Repository provider updates
|
||||
- Basic testing
|
||||
|
||||
### Phase 2: CLI Enhancement (Future)
|
||||
- Automatic certificate discovery in CLI
|
||||
- Enhanced error messages
|
||||
- Debug logging for certificate resolution
|
||||
|
||||
### Phase 3: Migration Tools (Future)
|
||||
- Automated migration scripts
|
||||
- Validation tools
|
||||
- Documentation updates
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
- BSL validation logic
|
||||
- Certificate resolution in providers
|
||||
- Controller behavior
|
||||
|
||||
### Integration Tests
|
||||
- End-to-end backup/restore with `caCertRef`
|
||||
- Certificate rotation scenarios
|
||||
- Migration from `caCert` to `caCertRef`
|
||||
|
||||
### Manual Testing Scenarios
|
||||
1. Create BSL with `caCertRef`
|
||||
2. Perform backup/restore operations
|
||||
3. Rotate certificate in Secret
|
||||
4. Verify continued operation
|
||||
|
||||
## Documentation
|
||||
|
||||
### User Documentation
|
||||
- Migration guide from `caCert` to `caCertRef`
|
||||
- Examples for common cloud providers
|
||||
- Troubleshooting guide
|
||||
|
||||
### API Documentation
|
||||
- Updated API reference
|
||||
- Deprecation notices
|
||||
- Field descriptions
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### ConfigMap-based Storage
|
||||
- Pros: Similar to Secrets, simpler API
|
||||
- Cons: Not designed for sensitive data, no encryption at rest
|
||||
- Decision: Secrets are the Kubernetes-standard for sensitive data
|
||||
|
||||
### External Certificate Management
|
||||
- Pros: Integration with cert-manager, etc.
|
||||
- Cons: Additional complexity, dependencies
|
||||
- Decision: Keep it simple, allow users to manage certificates as needed
|
||||
|
||||
### Immediate Removal of Inline Certificates
|
||||
- Pros: Cleaner API, forces best practices
|
||||
- Cons: Breaking change, migration burden
|
||||
- Decision: Gradual deprecation respects existing users
|
||||
|
||||
## Conclusion
|
||||
|
||||
This design provides a secure, Kubernetes-native approach to certificate management in Velero while maintaining backward compatibility. It establishes the foundation for enhanced CLI functionality and improved user experience, addressing the concerns raised in issue #9097 and enabling the features proposed in PR #8557.
|
||||
|
||||
The phased approach ensures smooth migration for existing users while delivering immediate security benefits for new deployments.
|
||||
@@ -94,7 +94,7 @@ RUN ARCH=$(go env GOARCH) && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.5.0
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
|
||||
@@ -169,7 +169,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookLog.Error(err)
|
||||
errors = append(errors, err)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
hookFailed = true
|
||||
}
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, hookFailed, hookErr)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, hookFailed, hookErr)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
@@ -239,7 +239,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
// containers to become ready.
|
||||
// Each unexecuted hook is logged as an error and this error will be returned from this function.
|
||||
for _, hooks := range byContainer {
|
||||
for i, hook := range hooks {
|
||||
for _, hook := range hooks {
|
||||
if hook.executed {
|
||||
continue
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
},
|
||||
)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), i, true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hook.hookIndex, true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
||||
@@ -706,6 +706,130 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple hooks with non-sequential indices (bug #9359)",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
groupResource: "pods",
|
||||
byContainer: map[string][]PodExecRestoreHook{
|
||||
"container1": {
|
||||
{
|
||||
HookName: "first-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/foo"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 0,
|
||||
},
|
||||
{
|
||||
HookName: "second-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/bar"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 2,
|
||||
},
|
||||
{
|
||||
HookName: "third-hook",
|
||||
HookSource: HookSourceAnnotation,
|
||||
Hook: velerov1api.ExecRestoreHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/third"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
ExecTimeout: metav1.Duration{Duration: time.Second},
|
||||
WaitTimeout: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
hookIndex: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedExecutions: []expectedExecution{
|
||||
{
|
||||
name: "first-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/foo"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "second-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/bar"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
{
|
||||
name: "third-hook",
|
||||
hook: &velerov1api.ExecHook{
|
||||
Container: "container1",
|
||||
Command: []string{"/usr/bin/third"},
|
||||
OnError: velerov1api.HookErrorModeContinue,
|
||||
Timeout: metav1.Duration{Duration: time.Second},
|
||||
},
|
||||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
},
|
||||
},
|
||||
expectedErrors: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package volumehelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
@@ -33,8 +36,16 @@ type volumeHelperImpl struct {
|
||||
// to the volume policy check, but fs-backup is based on the pod resource,
|
||||
// the resource filter on PVC and PV doesn't work on this scenario.
|
||||
backupExcludePVC bool
|
||||
// pvcPodCache provides cached PVC to Pod mappings for improved performance.
|
||||
// When there are many PVCs and pods, using this cache avoids O(N*M) lookups.
|
||||
pvcPodCache *podvolumeutil.PVCPodCache
|
||||
}
|
||||
|
||||
// NewVolumeHelperImpl creates a VolumeHelper without PVC-to-Pod caching.
|
||||
//
|
||||
// Deprecated: Use NewVolumeHelperImplWithNamespaces or NewVolumeHelperImplWithCache instead
|
||||
// for better performance. These functions provide PVC-to-Pod caching which avoids O(N*M)
|
||||
// complexity when there are many PVCs and pods. See issue #9179 for details.
|
||||
func NewVolumeHelperImpl(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
@@ -43,6 +54,43 @@ func NewVolumeHelperImpl(
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
) VolumeHelper {
|
||||
// Pass nil namespaces - no cache will be built, so this never fails.
|
||||
// This is used by plugins that don't need the cache optimization.
|
||||
vh, _ := NewVolumeHelperImplWithNamespaces(
|
||||
volumePolicy,
|
||||
snapshotVolumes,
|
||||
logger,
|
||||
client,
|
||||
defaultVolumesToFSBackup,
|
||||
backupExcludePVC,
|
||||
nil,
|
||||
)
|
||||
return vh
|
||||
}
|
||||
|
||||
// NewVolumeHelperImplWithNamespaces creates a VolumeHelper with a PVC-to-Pod cache for improved performance.
|
||||
// The cache is built internally from the provided namespaces list.
|
||||
// This avoids O(N*M) complexity when there are many PVCs and pods.
|
||||
// See issue #9179 for details.
|
||||
// Returns an error if cache building fails - callers should not proceed with backup in this case.
|
||||
func NewVolumeHelperImplWithNamespaces(
|
||||
volumePolicy *resourcepolicies.Policies,
|
||||
snapshotVolumes *bool,
|
||||
logger logrus.FieldLogger,
|
||||
client crclient.Client,
|
||||
defaultVolumesToFSBackup bool,
|
||||
backupExcludePVC bool,
|
||||
namespaces []string,
|
||||
) (VolumeHelper, error) {
|
||||
var pvcPodCache *podvolumeutil.PVCPodCache
|
||||
if len(namespaces) > 0 {
|
||||
pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
if err := pvcPodCache.BuildCacheForNamespaces(context.Background(), namespaces, client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.Infof("Built PVC-to-Pod cache for %d namespaces", len(namespaces))
|
||||
}
|
||||
|
||||
return &volumeHelperImpl{
|
||||
volumePolicy: volumePolicy,
|
||||
snapshotVolumes: snapshotVolumes,
|
||||
@@ -50,7 +98,33 @@ func NewVolumeHelperImpl(
|
||||
client: client,
|
||||
defaultVolumesToFSBackup: defaultVolumesToFSBackup,
|
||||
backupExcludePVC: backupExcludePVC,
|
||||
pvcPodCache: pvcPodCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewVolumeHelperImplWithCache creates a VolumeHelper using an externally managed PVC-to-Pod cache.
|
||||
// This is used by plugins that build the cache lazily per-namespace (following the pattern from PR #9226).
|
||||
// The cache can be nil, in which case PVC-to-Pod lookups will fall back to direct API calls.
|
||||
func NewVolumeHelperImplWithCache(
|
||||
backup velerov1api.Backup,
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
pvcPodCache *podvolumeutil.PVCPodCache,
|
||||
) (VolumeHelper, error) {
|
||||
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(backup, client, logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get volume policies from backup")
|
||||
}
|
||||
|
||||
return &volumeHelperImpl{
|
||||
volumePolicy: resourcePolicies,
|
||||
snapshotVolumes: backup.Spec.SnapshotVolumes,
|
||||
logger: logger,
|
||||
client: client,
|
||||
defaultVolumesToFSBackup: boolptr.IsSetToTrue(backup.Spec.DefaultVolumesToFsBackup),
|
||||
backupExcludePVC: boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData),
|
||||
pvcPodCache: pvcPodCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error) {
|
||||
@@ -105,10 +179,12 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
||||
// If this PV is claimed, see if we've already taken a (pod volume backup)
|
||||
// snapshot of the contents of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
pods, err := podvolumeutil.GetPodsUsingPVC(
|
||||
// Use cached lookup if available for better performance with many PVCs/pods
|
||||
pods, err := podvolumeutil.GetPodsUsingPVCWithCache(
|
||||
pv.Spec.ClaimRef.Namespace,
|
||||
pv.Spec.ClaimRef.Name,
|
||||
v.client,
|
||||
v.pvcPodCache,
|
||||
)
|
||||
if err != nil {
|
||||
v.logger.WithError(err).Errorf("fail to get pod for PV %s", pv.Name)
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
)
|
||||
|
||||
func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
||||
@@ -738,3 +739,498 @@ func TestGetVolumeFromResource(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "resource is not a PersistentVolume or Volume")
|
||||
})
|
||||
}
|
||||
|
||||
func TestVolumeHelperImplWithCache_ShouldPerformSnapshot(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputObj runtime.Object
|
||||
groupResource schema.GroupResource
|
||||
pod *corev1api.Pod
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
buildCache bool
|
||||
shouldSnapshot bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumePolicy match with cache, returns true",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
buildCache: true,
|
||||
shouldSnapshot: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumePolicy not match, fs-backup via opt-out with cache, skips snapshot",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp3-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
defaultVolumesToFSBackup: true,
|
||||
buildCache: true,
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Cache not built, falls back to direct lookup",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
buildCache: false,
|
||||
shouldSnapshot: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "No volume policy, defaultVolumesToFSBackup with cache, skips snapshot",
|
||||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result(),
|
||||
resourcePolicies: nil,
|
||||
snapshotVolumesFlag: ptr.To(true),
|
||||
defaultVolumesToFSBackup: true,
|
||||
buildCache: true,
|
||||
shouldSnapshot: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
objs := []runtime.Object{
|
||||
&corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
if tc.pod != nil {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
if tc.buildCache {
|
||||
namespaces = []string{"ns"}
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithNamespaces(
|
||||
p,
|
||||
tc.snapshotVolumesFlag,
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
tc.defaultVolumesToFSBackup,
|
||||
false,
|
||||
namespaces,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.inputObj)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, tc.groupResource)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldSnapshot, actualShouldSnapshot, "Want shouldSnapshot as %t; Got shouldSnapshot as %t", tc.shouldSnapshot, actualShouldSnapshot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeHelperImplWithCache_ShouldPerformFSBackup(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1api.Pod
|
||||
resources []runtime.Object
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
buildCache bool
|
||||
shouldFSBackup bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumePolicy match with cache, return true",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.FSBackup,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildCache: true,
|
||||
shouldFSBackup: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumePolicy match with cache, action is snapshot, return false",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []resourcepolicies.VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2-csi"},
|
||||
},
|
||||
Action: resourcepolicies.Action{
|
||||
Type: resourcepolicies.Snapshot,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildCache: true,
|
||||
shouldFSBackup: false,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "Cache not built, falls back to direct lookup, opt-in annotation",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToBackupAnnotation, "vol-1")).
|
||||
Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "vol-1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
}).Result(),
|
||||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
buildCache: false,
|
||||
defaultVolumesToFSBackup: false,
|
||||
shouldFSBackup: true,
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.resources...)
|
||||
if tc.pod != nil {
|
||||
require.NoError(t, fakeClient.Create(t.Context(), tc.pod))
|
||||
}
|
||||
|
||||
var p *resourcepolicies.Policies
|
||||
if tc.resourcePolicies != nil {
|
||||
p = &resourcepolicies.Policies{}
|
||||
err := p.BuildPolicy(tc.resourcePolicies)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
if tc.buildCache {
|
||||
namespaces = []string{"ns"}
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithNamespaces(
|
||||
p,
|
||||
tc.snapshotVolumesFlag,
|
||||
logrus.StandardLogger(),
|
||||
fakeClient,
|
||||
tc.defaultVolumesToFSBackup,
|
||||
false,
|
||||
namespaces,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualShouldFSBackup, actualError := vh.ShouldPerformFSBackup(tc.pod.Spec.Volumes[0], *tc.pod)
|
||||
if tc.expectedErr {
|
||||
require.Error(t, actualError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, actualError)
|
||||
require.Equalf(t, tc.shouldFSBackup, actualShouldFSBackup, "Want shouldFSBackup as %t; Got shouldFSBackup as %t", tc.shouldFSBackup, actualShouldFSBackup)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewVolumeHelperImplWithCache tests the NewVolumeHelperImplWithCache constructor
|
||||
// which is used by plugins that build the cache lazily per-namespace.
|
||||
func TestNewVolumeHelperImplWithCache(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
backup velerov1api.Backup
|
||||
resourcePolicyConfigMap *corev1api.ConfigMap
|
||||
pvcPodCache bool // whether to pass a cache
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "creates VolumeHelper with nil cache",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(false),
|
||||
},
|
||||
},
|
||||
pvcPodCache: false,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "creates VolumeHelper with non-nil cache",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(true),
|
||||
SnapshotMoveData: ptr.To(true),
|
||||
},
|
||||
},
|
||||
pvcPodCache: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "creates VolumeHelper with resource policies",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "resource-policy",
|
||||
},
|
||||
},
|
||||
},
|
||||
resourcePolicyConfigMap: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "resource-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"policy": `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
storageClass:
|
||||
- gp2-csi
|
||||
action:
|
||||
type: snapshot`,
|
||||
},
|
||||
},
|
||||
pvcPodCache: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "fails when resource policy ConfigMap not found",
|
||||
backup: velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "non-existent-policy",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvcPodCache: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
if tc.resourcePolicyConfigMap != nil {
|
||||
objs = append(objs, tc.resourcePolicyConfigMap)
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
var cache *podvolumeutil.PVCPodCache
|
||||
if tc.pvcPodCache {
|
||||
cache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithCache(
|
||||
tc.backup,
|
||||
fakeClient,
|
||||
logrus.StandardLogger(),
|
||||
cache,
|
||||
)
|
||||
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, vh)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewVolumeHelperImplWithCache_UsesCache verifies that the VolumeHelper created
|
||||
// via NewVolumeHelperImplWithCache actually uses the provided cache for lookups.
|
||||
func TestNewVolumeHelperImplWithCache_UsesCache(t *testing.T) {
|
||||
// Create a pod that uses a PVC via opt-out (defaultVolumesToFsBackup=true)
|
||||
pod := builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
).Result()
|
||||
|
||||
pvc := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
}
|
||||
|
||||
pv := builder.ForPersistentVolume("example-pv").StorageClass("gp2-csi").ClaimRef("ns", "pvc-1").Result()
|
||||
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, pvc, pv, pod)
|
||||
|
||||
// Build cache for the namespace
|
||||
cache := podvolumeutil.NewPVCPodCache()
|
||||
err := cache.BuildCacheForNamespace(t.Context(), "ns", fakeClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
backup := velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: ptr.To(true),
|
||||
DefaultVolumesToFsBackup: ptr.To(true), // opt-out mode
|
||||
},
|
||||
}
|
||||
|
||||
vh, err := NewVolumeHelperImplWithCache(backup, fakeClient, logrus.StandardLogger(), cache)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert PV to unstructured
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pv)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ShouldPerformSnapshot should return false because the volume is selected for fs-backup
|
||||
// This relies on the cache to find the pod using the PVC
|
||||
shouldSnapshot, err := vh.ShouldPerformSnapshot(&unstructured.Unstructured{Object: obj}, kuberesource.PersistentVolumes)
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldSnapshot, "Expected snapshot to be skipped due to fs-backup selection via cache")
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -146,8 +148,15 @@ type ObjectStorageLocation struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// CACert defines a CA bundle to use when verifying TLS connections to the provider.
|
||||
// Deprecated: Use CACertRef instead.
|
||||
// +optional
|
||||
CACert []byte `json:"caCert,omitempty"`
|
||||
|
||||
// CACertRef is a reference to a Secret containing the CA certificate bundle to use
|
||||
// when verifying TLS connections to the provider. The Secret must be in the same
|
||||
// namespace as the BackupStorageLocation.
|
||||
// +optional
|
||||
CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation.
|
||||
@@ -177,3 +186,13 @@ const (
|
||||
|
||||
// TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus.
|
||||
// TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus.
|
||||
|
||||
// Validate validates the BackupStorageLocation to ensure that only one of CACert or CACertRef is set.
|
||||
func (bsl *BackupStorageLocation) Validate() error {
|
||||
if bsl.Spec.ObjectStorage != nil &&
|
||||
bsl.Spec.ObjectStorage.CACert != nil &&
|
||||
bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
return errors.New("cannot specify both caCert and caCertRef in objectStorage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
121
pkg/apis/velero/v1/backupstoragelocation_types_test.go
Normal file
121
pkg/apis/velero/v1/backupstoragelocation_types_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestBackupStorageLocationValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bsl *BackupStorageLocation
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid - neither CACert nor CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - only CACert set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("test-cert"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - only CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "ca-cert-secret",
|
||||
},
|
||||
Key: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - both CACert and CACertRef set",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: &ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("test-cert"),
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "ca-cert-secret",
|
||||
},
|
||||
Key: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "valid - no ObjectStorage",
|
||||
bsl: &BackupStorageLocation{
|
||||
Spec: BackupStorageLocationSpec{
|
||||
StorageType: StorageType{
|
||||
ObjectStorage: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := test.bsl.Validate()
|
||||
if test.expectError && err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
if !test.expectError && err != nil {
|
||||
t.Errorf("expected no error but got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -915,6 +915,11 @@ func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) {
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.CACertRef != nil {
|
||||
in, out := &in.CACertRef, &out.CACertRef
|
||||
*out = new(corev1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation.
|
||||
|
||||
@@ -76,14 +76,8 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
pvc.Spec.Selector = nil
|
||||
}
|
||||
|
||||
// remove label selectors with "velero.io/" prefixing in the key which is left by Velero restore
|
||||
if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil {
|
||||
for k := range pvc.Spec.Selector.MatchLabels {
|
||||
if strings.HasPrefix(k, "velero.io/") {
|
||||
delete(pvc.Spec.Selector.MatchLabels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean stale Velero labels from PVC metadata and selector
|
||||
a.cleanupStaleVeleroLabels(pvc, backup)
|
||||
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
@@ -92,3 +86,50 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
|
||||
return &unstructured.Unstructured{Object: pvcMap}, actionhelpers.RelatedItemsForPVC(pvc, a.log), nil
|
||||
}
|
||||
|
||||
// cleanupStaleVeleroLabels removes stale Velero labels from both the PVC metadata
|
||||
// and the selector's match labels to ensure clean backups
|
||||
func (a *PVCAction) cleanupStaleVeleroLabels(pvc *corev1api.PersistentVolumeClaim, backup *v1.Backup) {
|
||||
// Clean stale Velero labels from selector match labels
|
||||
if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil {
|
||||
for k := range pvc.Spec.Selector.MatchLabels {
|
||||
if strings.HasPrefix(k, "velero.io/") {
|
||||
a.log.Infof("Deleting stale Velero label %s from PVC %s selector", k, pvc.Name)
|
||||
delete(pvc.Spec.Selector.MatchLabels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean stale Velero labels from main metadata
|
||||
if pvc.Labels != nil {
|
||||
for k, v := range pvc.Labels {
|
||||
// Only remove labels that are clearly stale from previous operations
|
||||
shouldRemove := false
|
||||
|
||||
// Always remove restore-name labels as these are from previous restores
|
||||
if k == v1.RestoreNameLabel {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if k == v1.MustIncludeAdditionalItemAnnotation {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
// Remove backup-name labels that don't match current backup
|
||||
if k == v1.BackupNameLabel && v != backup.Name {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
// Remove volume-snapshot-name labels from previous CSI backups
|
||||
// Note: If this backup creates new CSI snapshots, the CSI action will add them back
|
||||
if k == v1.VolumeSnapshotLabel {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
a.log.Infof("Deleting stale Velero label %s=%s from PVC %s", k, v, pvc.Name)
|
||||
delete(pvc.Labels, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,3 +149,176 @@ func TestBackupPVAction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, additional)
|
||||
}
|
||||
|
||||
func TestCleanupStaleVeleroLabels(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputPVC *corev1api.PersistentVolumeClaim
|
||||
backup *v1.Backup
|
||||
expectedLabels map[string]string
|
||||
expectedSelector *metav1.LabelSelector
|
||||
}{
|
||||
{
|
||||
name: "removes restore-name labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes backup-name labels that don't match current backup",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "keeps backup-name labels that match current backup",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/backup-name": "current-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"velero.io/backup-name": "current-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes volume-snapshot-name labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/volume-snapshot-name": "old-snapshot",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes velero labels from selector match labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: nil,
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles PVC with no labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: nil,
|
||||
},
|
||||
{
|
||||
name: "handles PVC with no selector",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
expectedSelector: nil,
|
||||
},
|
||||
{
|
||||
name: "removes multiple stale velero labels",
|
||||
inputPVC: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Labels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"velero.io/backup-name": "old-backup",
|
||||
"velero.io/volume-snapshot-name": "old-snapshot",
|
||||
"app": "myapp",
|
||||
"env": "prod",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"velero.io/restore-name": "old-restore",
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "current-backup"}},
|
||||
expectedLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
"env": "prod",
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "myapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
action := NewPVCAction(velerotest.NewLogger())
|
||||
|
||||
// Create a copy of the input PVC to avoid modifying the test case
|
||||
pvcCopy := tc.inputPVC.DeepCopy()
|
||||
|
||||
action.cleanupStaleVeleroLabels(pvcCopy, tc.backup)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, pvcCopy.Labels, "Labels should match expected values")
|
||||
assert.Equal(t, tc.expectedSelector, pvcCopy.Spec.Selector, "Selector should match expected values")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
internalvolumehelper "github.com/vmware-tanzu/velero/internal/volumehelper"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
veleroclient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
@@ -57,6 +58,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
podvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume"
|
||||
)
|
||||
|
||||
// TODO: Replace hardcoded VolumeSnapshot finalizer strings with constants from
|
||||
@@ -72,6 +74,14 @@ const (
|
||||
type pvcBackupItemAction struct {
|
||||
log logrus.FieldLogger
|
||||
crClient crclient.Client
|
||||
|
||||
// pvcPodCache provides lazy per-namespace caching of PVC-to-Pod mappings.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this
|
||||
// without mutex or backup UID tracking.
|
||||
// This avoids the O(N*M) performance issue when there are many PVCs and pods.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
pvcPodCache *podvolumeutil.PVCPodCache
|
||||
}
|
||||
|
||||
// AppliesTo returns information indicating that the PVCBackupItemAction
|
||||
@@ -97,6 +107,59 @@ func (p *pvcBackupItemAction) validateBackup(backup velerov1api.Backup) (valid b
|
||||
return true
|
||||
}
|
||||
|
||||
// ensurePVCPodCacheForNamespace ensures the PVC-to-Pod cache is built for the given namespace.
|
||||
// This uses lazy per-namespace caching following the pattern from PR #9226.
|
||||
// Since plugin instances are unique per backup, we can safely cache without mutex or backup UID tracking.
|
||||
func (p *pvcBackupItemAction) ensurePVCPodCacheForNamespace(ctx context.Context, namespace string) error {
|
||||
// Initialize cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
// Build cache for namespace if not already done
|
||||
if !p.pvcPodCache.IsNamespaceBuilt(namespace) {
|
||||
p.log.Debugf("Building PVC-to-Pod cache for namespace %s", namespace)
|
||||
if err := p.pvcPodCache.BuildCacheForNamespace(ctx, namespace, p.crClient); err != nil {
|
||||
return errors.Wrapf(err, "failed to build PVC-to-Pod cache for namespace %s", namespace)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolumeHelperWithCache creates a VolumeHelper using the pre-built PVC-to-Pod cache.
|
||||
// The cache should be ensured for the relevant namespace(s) before calling this.
|
||||
func (p *pvcBackupItemAction) getVolumeHelperWithCache(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
// Create VolumeHelper with our lazy-built cache
|
||||
vh, err := internalvolumehelper.NewVolumeHelperImplWithCache(
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
p.pvcPodCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create VolumeHelper")
|
||||
}
|
||||
return vh, nil
|
||||
}
|
||||
|
||||
// getOrCreateVolumeHelper returns a VolumeHelper with lazy per-namespace caching.
|
||||
// The VolumeHelper uses the pvcPodCache which is populated lazily as namespaces are encountered.
|
||||
// Callers should use ensurePVCPodCacheForNamespace before calling methods that need
|
||||
// PVC-to-Pod lookups for a specific namespace.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we can safely cache this.
|
||||
// See issue #9179 and PR #9226 for details.
|
||||
func (p *pvcBackupItemAction) getOrCreateVolumeHelper(backup *velerov1api.Backup) (internalvolumehelper.VolumeHelper, error) {
|
||||
// Initialize the PVC-to-Pod cache if needed
|
||||
if p.pvcPodCache == nil {
|
||||
p.pvcPodCache = podvolumeutil.NewPVCPodCache()
|
||||
}
|
||||
|
||||
// Return the VolumeHelper with our lazily-built cache
|
||||
// The cache will be populated incrementally as namespaces are encountered
|
||||
return p.getVolumeHelperWithCache(backup)
|
||||
}
|
||||
|
||||
func (p *pvcBackupItemAction) validatePVCandPV(
|
||||
pvc corev1api.PersistentVolumeClaim,
|
||||
item runtime.Unstructured,
|
||||
@@ -248,12 +311,24 @@ func (p *pvcBackupItemAction) Execute(
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
|
||||
// Ensure PVC-to-Pod cache is built for this namespace (lazy per-namespace caching)
|
||||
if err := p.ensurePVCPodCacheForNamespace(context.TODO(), pvc.Namespace); err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
// Get or create the cached VolumeHelper for this backup
|
||||
vh, err := p.getOrCreateVolumeHelper(backup)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
item,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, "", nil, err
|
||||
@@ -621,8 +696,19 @@ func (p *pvcBackupItemAction) getVolumeSnapshotReference(
|
||||
return nil, errors.Wrapf(err, "failed to list PVCs in VolumeGroupSnapshot group %q in namespace %q", group, pvc.Namespace)
|
||||
}
|
||||
|
||||
// Ensure PVC-to-Pod cache is built for this namespace (lazy per-namespace caching)
|
||||
if err := p.ensurePVCPodCacheForNamespace(ctx, pvc.Namespace); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build PVC-to-Pod cache for namespace %s", pvc.Namespace)
|
||||
}
|
||||
|
||||
// Get the cached VolumeHelper for filtering PVCs by volume policy
|
||||
vh, err := p.getOrCreateVolumeHelper(backup)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get VolumeHelper for filtering PVCs in group %q", group)
|
||||
}
|
||||
|
||||
// Filter PVCs by volume policy
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup)
|
||||
filteredPVCs, err := p.filterPVCsByVolumePolicy(groupedPVCs, backup, vh)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to filter PVCs by volume policy for VolumeGroupSnapshot group %q", group)
|
||||
}
|
||||
@@ -759,11 +845,12 @@ func (p *pvcBackupItemAction) listGroupedPVCs(ctx context.Context, namespace, la
|
||||
func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
pvcs []corev1api.PersistentVolumeClaim,
|
||||
backup *velerov1api.Backup,
|
||||
vh internalvolumehelper.VolumeHelper,
|
||||
) ([]corev1api.PersistentVolumeClaim, error) {
|
||||
var filteredPVCs []corev1api.PersistentVolumeClaim
|
||||
|
||||
for _, pvc := range pvcs {
|
||||
// Convert PVC to unstructured for ShouldPerformSnapshotWithBackup
|
||||
// Convert PVC to unstructured for ShouldPerformSnapshotWithVolumeHelper
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to convert PVC %s/%s to unstructured", pvc.Namespace, pvc.Name)
|
||||
@@ -771,12 +858,14 @@ func (p *pvcBackupItemAction) filterPVCsByVolumePolicy(
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
// Check if this PVC should be snapshotted according to volume policies
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
|
||||
// Uses the cached VolumeHelper for better performance with many PVCs/pods
|
||||
shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
p.crClient,
|
||||
p.log,
|
||||
vh,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check volume policy for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
|
||||
@@ -842,7 +842,9 @@ volumePolicies:
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup)
|
||||
// Pass nil for VolumeHelper in tests - it will fall back to creating a new one per call
|
||||
// This is the expected behavior for testing and third-party plugins
|
||||
result, err := action.filterPVCsByVolumePolicy(tt.pvcs, backup, nil)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
@@ -860,6 +862,111 @@ volumePolicies:
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterPVCsByVolumePolicyWithVolumeHelper tests filterPVCsByVolumePolicy when a
|
||||
// pre-created VolumeHelper is passed (non-nil). This exercises the cached path used
|
||||
// by the CSI PVC BIA plugin for better performance.
|
||||
func TestFilterPVCsByVolumePolicyWithVolumeHelper(t *testing.T) {
|
||||
// Create test PVCs and PVs
|
||||
pvcs := []corev1api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-csi", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-csi",
|
||||
StorageClassName: pointer.String("sc-csi"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvc-nfs", Namespace: "ns-1"},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv-nfs",
|
||||
StorageClassName: pointer.String("sc-nfs"),
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{Phase: corev1api.ClaimBound},
|
||||
},
|
||||
}
|
||||
|
||||
pvs := []corev1api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-csi"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "csi-driver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-nfs"},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{
|
||||
Server: "nfs-server",
|
||||
Path: "/export",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake client with PVs
|
||||
objs := []runtime.Object{}
|
||||
for i := range pvs {
|
||||
objs = append(objs, &pvs[i])
|
||||
}
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
// Create backup with volume policy that skips NFS volumes
|
||||
volumePolicyStr := `
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
nfs: {}
|
||||
action:
|
||||
type: skip
|
||||
`
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "volume-policy",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"volume-policy": volumePolicyStr,
|
||||
},
|
||||
}
|
||||
require.NoError(t, client.Create(t.Context(), cm))
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
ResourcePolicy: &corev1api.TypedLocalObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: "volume-policy",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
|
||||
// Create a VolumeHelper using the same method the plugin would use
|
||||
vh, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
|
||||
// Test with the pre-created VolumeHelper (non-nil path)
|
||||
result, err := action.filterPVCsByVolumePolicy(pvcs, backup, vh)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should filter out the NFS PVC, leaving only the CSI PVC
|
||||
require.Len(t, result, 1)
|
||||
require.Equal(t, "pvc-csi", result[0].Name)
|
||||
}
|
||||
|
||||
func TestDetermineCSIDriver(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1959,3 +2066,42 @@ func TestPVCRequestSize(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetOrCreateVolumeHelper tests the VolumeHelper and PVC-to-Pod cache behavior.
|
||||
// Since plugin instances are unique per backup (created via newPluginManager and
|
||||
// cleaned up via CleanupClients at backup completion), we verify that the pvcPodCache
|
||||
// is properly initialized and reused across calls.
|
||||
func TestGetOrCreateVolumeHelper(t *testing.T) {
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
action := &pvcBackupItemAction{
|
||||
log: velerotest.NewLogger(),
|
||||
crClient: client,
|
||||
}
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
UID: types.UID("test-uid-1"),
|
||||
},
|
||||
}
|
||||
|
||||
// Initially, pvcPodCache should be nil
|
||||
require.Nil(t, action.pvcPodCache, "pvcPodCache should be nil initially")
|
||||
|
||||
// Get VolumeHelper first time - should create new cache and VolumeHelper
|
||||
vh1, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh1)
|
||||
|
||||
// pvcPodCache should now be initialized
|
||||
require.NotNil(t, action.pvcPodCache, "pvcPodCache should be initialized after first call")
|
||||
cache1 := action.pvcPodCache
|
||||
|
||||
// Get VolumeHelper second time - should reuse the same cache
|
||||
vh2, err := action.getOrCreateVolumeHelper(backup)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh2)
|
||||
|
||||
// The pvcPodCache should be the same instance
|
||||
require.Same(t, cache1, action.pvcPodCache, "Expected same pvcPodCache instance on repeated calls")
|
||||
}
|
||||
|
||||
@@ -408,6 +408,28 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
}
|
||||
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)}
|
||||
|
||||
// Resolve namespaces for PVC-to-Pod cache building in volumehelper.
|
||||
// See issue #9179 for details.
|
||||
namespaces, err := backupRequest.NamespaceIncludesExcludes.ResolveNamespaceList()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to resolve namespace list for PVC-to-Pod cache")
|
||||
return err
|
||||
}
|
||||
|
||||
volumeHelperImpl, err := volumehelper.NewVolumeHelperImplWithNamespaces(
|
||||
backupRequest.ResPolicies,
|
||||
backupRequest.Spec.SnapshotVolumes,
|
||||
log,
|
||||
kb.kbClient,
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
namespaces,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to build PVC-to-Pod cache for volume policy lookups")
|
||||
return err
|
||||
}
|
||||
|
||||
itemBackupper := &itemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
@@ -421,15 +443,8 @@ func (kb *kubernetesBackupper) BackupWithResolvers(
|
||||
itemHookHandler: &hook.DefaultItemHookHandler{
|
||||
PodCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
volumeHelperImpl: volumehelper.NewVolumeHelperImpl(
|
||||
backupRequest.ResPolicies,
|
||||
backupRequest.Spec.SnapshotVolumes,
|
||||
log,
|
||||
kb.kbClient,
|
||||
boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup),
|
||||
!backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()),
|
||||
),
|
||||
hookTracker: hook.NewHookTracker(),
|
||||
volumeHelperImpl: volumeHelperImpl,
|
||||
kubernetesBackupper: kb,
|
||||
}
|
||||
|
||||
|
||||
@@ -93,6 +93,15 @@ func (b *BackupStorageLocationBuilder) CACert(val []byte) *BackupStorageLocation
|
||||
return b
|
||||
}
|
||||
|
||||
// CACertRef sets the BackupStorageLocation's object storage CACertRef (Secret reference).
|
||||
func (b *BackupStorageLocationBuilder) CACertRef(selector *corev1api.SecretKeySelector) *BackupStorageLocationBuilder {
|
||||
if b.object.Spec.StorageType.ObjectStorage == nil {
|
||||
b.object.Spec.StorageType.ObjectStorage = new(velerov1api.ObjectStorageLocation)
|
||||
}
|
||||
b.object.Spec.ObjectStorage.CACertRef = selector
|
||||
return b
|
||||
}
|
||||
|
||||
// Default sets the BackupStorageLocation's is default or not
|
||||
func (b *BackupStorageLocationBuilder) Default(isDefault bool) *BackupStorageLocationBuilder {
|
||||
b.object.Spec.Default = isDefault
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apimachineryRuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
)
|
||||
|
||||
// ContainerBuilder builds Container objects
|
||||
@@ -45,9 +47,9 @@ func ForPluginContainer(image string, pullPolicy corev1api.PullPolicy) *Containe
|
||||
return ForContainer(getName(image), image).PullPolicy(pullPolicy).VolumeMounts(volumeMount)
|
||||
}
|
||||
|
||||
// getName returns the 'name' component of a docker
|
||||
// image that includes the entire string except the registry name, and transforms the combined
|
||||
// string into a RFC-1123 compatible name.
|
||||
// getName returns the 'name' component of a docker image that includes the entire string
|
||||
// except the registry name, and transforms the combined string into a DNS-1123 compatible name
|
||||
// that fits within the 63-character limit for Kubernetes container names.
|
||||
func getName(image string) string {
|
||||
slashIndex := strings.Index(image, "/")
|
||||
slashCount := 0
|
||||
@@ -83,7 +85,10 @@ func getName(image string) string {
|
||||
re := strings.NewReplacer("/", "-",
|
||||
"_", "-",
|
||||
".", "-")
|
||||
return re.Replace(image[start:end])
|
||||
name := re.Replace(image[start:end])
|
||||
|
||||
// Ensure the name doesn't exceed Kubernetes container name length limit
|
||||
return label.GetValidName(name)
|
||||
}
|
||||
|
||||
// Result returns the built Container.
|
||||
|
||||
@@ -100,3 +100,50 @@ func TestGetName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNameWithLongPaths(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
image string
|
||||
validate func(t *testing.T, result string)
|
||||
}{
|
||||
{
|
||||
name: "plugin with deeply nested repository path exceeding 63 characters",
|
||||
image: "arohcpsvcdev.azurecr.io/redhat-user-workloads/ocp-art-tenant/oadp-hypershift-oadp-plugin-main@sha256:adb840bf3890b4904a8cdda1a74c82cf8d96c52eba9944ac10e795335d6fd450",
|
||||
validate: func(t *testing.T, result string) {
|
||||
t.Helper()
|
||||
// Should not exceed DNS-1123 label limit of 63 characters
|
||||
assert.LessOrEqual(t, len(result), 63, "Container name must satisfy DNS-1123 label constraints (max 63 chars)")
|
||||
// Should be exactly 63 characters (truncated with hash)
|
||||
assert.Len(t, result, 63)
|
||||
// Should be deterministic
|
||||
result2 := getName("arohcpsvcdev.azurecr.io/redhat-user-workloads/ocp-art-tenant/oadp-hypershift-oadp-plugin-main@sha256:adb840bf3890b4904a8cdda1a74c82cf8d96c52eba9944ac10e795335d6fd450")
|
||||
assert.Equal(t, result, result2)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plugin with normal path length (should remain unchanged)",
|
||||
image: "arohcpsvcdev.azurecr.io/konveyor/velero-plugin-for-microsoft-azure@sha256:b2db5f09da514e817a74c992dcca5f90b77c2ab0b2797eba947d224271d6070e",
|
||||
validate: func(t *testing.T, result string) {
|
||||
t.Helper()
|
||||
assert.Equal(t, "konveyor-velero-plugin-for-microsoft-azure", result)
|
||||
assert.LessOrEqual(t, len(result), 63)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "very long nested path",
|
||||
image: "registry.example.com/org/team/project/subproject/component/service/application-name-with-many-words:v1.2.3",
|
||||
validate: func(t *testing.T, result string) {
|
||||
t.Helper()
|
||||
assert.LessOrEqual(t, len(result), 63)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result := getName(test.image)
|
||||
test.validate(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,16 +354,62 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Infof("Using customized cachePVC config %v", cachePVCConfig)
|
||||
}
|
||||
|
||||
var podLabels map[string]string
|
||||
if s.dataPathConfigs != nil && len(s.dataPathConfigs.PodLabels) > 0 {
|
||||
podLabels = s.dataPathConfigs.PodLabels
|
||||
s.logger.Infof("Using customized pod labels %+v", podLabels)
|
||||
}
|
||||
|
||||
var podAnnotations map[string]string
|
||||
if s.dataPathConfigs != nil && len(s.dataPathConfigs.PodAnnotations) > 0 {
|
||||
podAnnotations = s.dataPathConfigs.PodAnnotations
|
||||
s.logger.Infof("Using customized pod annotations %+v", podAnnotations)
|
||||
}
|
||||
|
||||
if s.backupRepoConfigs != nil {
|
||||
s.logger.Infof("Using backup repo config %v", s.backupRepoConfigs)
|
||||
}
|
||||
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger, dataMovePriorityClass, privilegedFsBackup)
|
||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.mgr,
|
||||
s.kubeClient,
|
||||
s.dataPathMgr,
|
||||
s.vgdpCounter,
|
||||
s.nodeName,
|
||||
s.config.dataMoverPrepareTimeout,
|
||||
s.config.resourceTimeout,
|
||||
podResources,
|
||||
s.metrics,
|
||||
s.logger,
|
||||
dataMovePriorityClass,
|
||||
privilegedFsBackup,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
||||
}
|
||||
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.vgdpCounter, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, s.backupRepoConfigs, cachePVCConfig, podResources, s.logger, dataMovePriorityClass, privilegedFsBackup, s.repoConfigMgr)
|
||||
pvrReconciler := controller.NewPodVolumeRestoreReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.mgr,
|
||||
s.kubeClient,
|
||||
s.dataPathMgr,
|
||||
s.vgdpCounter,
|
||||
s.nodeName,
|
||||
s.config.dataMoverPrepareTimeout,
|
||||
s.config.resourceTimeout,
|
||||
s.backupRepoConfigs,
|
||||
cachePVCConfig,
|
||||
podResources,
|
||||
s.logger,
|
||||
dataMovePriorityClass,
|
||||
privilegedFsBackup,
|
||||
s.repoConfigMgr,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
if err := pvrReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
||||
}
|
||||
@@ -388,6 +434,8 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger,
|
||||
s.metrics,
|
||||
dataMovePriorityClass,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
if err := dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
|
||||
@@ -416,6 +464,8 @@ func (s *nodeAgentServer) run() {
|
||||
s.metrics,
|
||||
dataMovePriorityClass,
|
||||
s.repoConfigMgr,
|
||||
podLabels,
|
||||
podAnnotations,
|
||||
)
|
||||
|
||||
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
|
||||
@@ -558,7 +558,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
return clientmgmt.NewManager(logger, s.logLevel, s.pluginRegistry)
|
||||
}
|
||||
|
||||
backupStoreGetter := persistence.NewObjectBackupStoreGetter(s.credentialFileStore)
|
||||
backupStoreGetter := persistence.NewObjectBackupStoreGetterWithSecretStore(s.credentialFileStore, s.credentialSecretStore)
|
||||
|
||||
backupTracker := controller.NewBackupTracker()
|
||||
|
||||
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
@@ -52,6 +54,7 @@ func GetCACertFromRestore(ctx context.Context, client kbclient.Client, namespace
|
||||
}
|
||||
|
||||
// GetCACertFromBSL fetches a BackupStorageLocation directly and returns its cacert
|
||||
// Priority order: caCertRef (from Secret) > caCert (inline, deprecated)
|
||||
func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bslName string) (string, error) {
|
||||
if bslName == "" {
|
||||
return "", nil
|
||||
@@ -71,7 +74,44 @@ func GetCACertFromBSL(ctx context.Context, client kbclient.Client, namespace, bs
|
||||
return "", errors.Wrapf(err, "error getting backup storage location %s", bslName)
|
||||
}
|
||||
|
||||
if bsl.Spec.ObjectStorage != nil && len(bsl.Spec.ObjectStorage.CACert) > 0 {
|
||||
if bsl.Spec.ObjectStorage == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Prefer caCertRef over inline caCert
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
// Fetch certificate from Secret
|
||||
secret := &corev1api.Secret{}
|
||||
secretKey := types.NamespacedName{
|
||||
Name: bsl.Spec.ObjectStorage.CACertRef.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
if err := client.Get(ctx, secretKey, secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return "", errors.Errorf("certificate secret %s not found in namespace %s",
|
||||
bsl.Spec.ObjectStorage.CACertRef.Name, namespace)
|
||||
}
|
||||
return "", errors.Wrapf(err, "error getting certificate secret %s",
|
||||
bsl.Spec.ObjectStorage.CACertRef.Name)
|
||||
}
|
||||
|
||||
keyName := bsl.Spec.ObjectStorage.CACertRef.Key
|
||||
if keyName == "" {
|
||||
return "", errors.New("caCertRef key is empty")
|
||||
}
|
||||
|
||||
certData, ok := secret.Data[keyName]
|
||||
if !ok {
|
||||
return "", errors.Errorf("key %s not found in secret %s",
|
||||
keyName, bsl.Spec.ObjectStorage.CACertRef.Name)
|
||||
}
|
||||
|
||||
return string(certData), nil
|
||||
}
|
||||
|
||||
// Fall back to inline caCert (deprecated)
|
||||
if len(bsl.Spec.ObjectStorage.CACert) > 0 {
|
||||
return string(bsl.Spec.ObjectStorage.CACert), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
@@ -294,6 +295,271 @@ func TestGetCACertFromBSL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCACertFromBSL_WithCACertRef tests the new caCertRef functionality
|
||||
func TestGetCACertFromBSL_WithCACertRef(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
bslName string
|
||||
bsl *velerov1api.BackupStorageLocation
|
||||
secret *corev1api.Secret
|
||||
expectedCACert string
|
||||
expectedError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "BSL with caCertRef pointing to valid secret",
|
||||
bslName: "test-bsl",
|
||||
bsl: &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-bsl",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
Key: "ca-bundle.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"ca-bundle.crt": []byte("test-cacert-from-secret"),
|
||||
},
|
||||
},
|
||||
expectedCACert: "test-cacert-from-secret",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "BSL with both caCertRef and caCert - caCertRef takes precedence",
|
||||
bslName: "test-bsl",
|
||||
bsl: &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-bsl",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACert: []byte("inline-cacert-deprecated"),
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
Key: "ca-bundle.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"ca-bundle.crt": []byte("cacert-from-secret-takes-precedence"),
|
||||
},
|
||||
},
|
||||
expectedCACert: "cacert-from-secret-takes-precedence",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "BSL with caCertRef but secret not found",
|
||||
bslName: "test-bsl",
|
||||
bsl: &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-bsl",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "missing-secret",
|
||||
},
|
||||
Key: "ca-bundle.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: nil,
|
||||
expectedCACert: "",
|
||||
expectedError: true,
|
||||
errorContains: "certificate secret missing-secret not found",
|
||||
},
|
||||
{
|
||||
name: "BSL with caCertRef but key not found in secret",
|
||||
bslName: "test-bsl",
|
||||
bsl: &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-bsl",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
Key: "missing-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"ca-bundle.crt": []byte("test-cacert"),
|
||||
},
|
||||
},
|
||||
expectedCACert: "",
|
||||
expectedError: true,
|
||||
errorContains: "key missing-key not found in secret test-secret",
|
||||
},
|
||||
{
|
||||
name: "BSL with caCertRef but empty key",
|
||||
bslName: "test-bsl",
|
||||
bsl: &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-bsl",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
Key: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"ca-bundle.crt": []byte("test-cacert"),
|
||||
},
|
||||
},
|
||||
expectedCACert: "",
|
||||
expectedError: true,
|
||||
errorContains: "caCertRef key is empty",
|
||||
},
|
||||
{
|
||||
name: "BSL with caCertRef containing multi-line PEM certificate",
|
||||
bslName: "test-bsl",
|
||||
bsl: &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-bsl",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "test-bucket",
|
||||
CACertRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
Key: "ca.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-ns",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"ca.pem": []byte("-----BEGIN CERTIFICATE-----\nMIIDETC...\n-----END CERTIFICATE-----\n"),
|
||||
},
|
||||
},
|
||||
expectedCACert: "-----BEGIN CERTIFICATE-----\nMIIDETC...\n-----END CERTIFICATE-----\n",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "BSL falls back to inline caCert when caCertRef is nil",
|
||||
bslName: "test-bsl",
|
||||
bsl: builder.ForBackupStorageLocation("test-ns", "test-bsl").
|
||||
Provider("aws").
|
||||
Bucket("test-bucket").
|
||||
CACert([]byte("fallback-inline-cacert")).
|
||||
Result(),
|
||||
secret: nil,
|
||||
expectedCACert: "fallback-inline-cacert",
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
if tc.bsl != nil {
|
||||
objs = append(objs, tc.bsl)
|
||||
}
|
||||
if tc.secret != nil {
|
||||
objs = append(objs, tc.secret)
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = velerov1api.AddToScheme(scheme)
|
||||
_ = corev1api.AddToScheme(scheme)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().
|
||||
WithScheme(scheme).
|
||||
WithRuntimeObjects(objs...).
|
||||
Build()
|
||||
|
||||
cacert, err := GetCACertFromBSL(t.Context(), fakeClient, "test-ns", tc.bslName)
|
||||
|
||||
if tc.expectedError {
|
||||
require.Error(t, err)
|
||||
if tc.errorContains != "" {
|
||||
assert.Contains(t, err.Error(), tc.errorContains)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCACert, cacert)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCACertFromBackup_ClientError tests error scenarios where client.Get returns non-NotFound errors
|
||||
func TestGetCACertFromBackup_ClientError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
|
||||
@@ -201,11 +201,22 @@ func (r *BackupRepoReconciler) needInvalidBackupRepo(oldObj client.Object, newOb
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if either CACert or CACertRef has changed
|
||||
if !bytes.Equal(oldStorage.CACert, newStorage.CACert) {
|
||||
logger.Info("BSL's CACert has changed, invalid backup repositories")
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if CACertRef has changed
|
||||
if (oldStorage.CACertRef == nil && newStorage.CACertRef != nil) ||
|
||||
(oldStorage.CACertRef != nil && newStorage.CACertRef == nil) ||
|
||||
(oldStorage.CACertRef != nil && newStorage.CACertRef != nil &&
|
||||
(oldStorage.CACertRef.Name != newStorage.CACertRef.Name ||
|
||||
oldStorage.CACertRef.Key != newStorage.CACertRef.Key)) {
|
||||
logger.Info("BSL's CACertRef has changed, invalid backup repositories")
|
||||
return true
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldConfig, newConfig) {
|
||||
logger.Info("BSL's storage config has changed, invalid backup repositories")
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -46,6 +47,104 @@ const (
|
||||
bslValidationEnqueuePeriod = 10 * time.Second
|
||||
)
|
||||
|
||||
// sanitizeStorageError cleans up verbose HTTP responses from cloud provider errors,
|
||||
// particularly Azure which includes full HTTP response details and XML in error messages.
|
||||
// It extracts the error code and message while removing HTTP headers and response bodies.
|
||||
// It also scrubs sensitive information like SAS tokens from URLs.
|
||||
func sanitizeStorageError(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
errMsg := err.Error()
|
||||
|
||||
// Scrub sensitive information from URLs (SAS tokens, credentials, etc.)
|
||||
// Azure SAS token parameters: sig, se, st, sp, spr, sv, sr, sip, srt, ss
|
||||
// These appear as query parameters in URLs like: ?sig=value&se=value
|
||||
sasParamsRegex := regexp.MustCompile(`([?&])(sig|se|st|sp|spr|sv|sr|sip|srt|ss)=([^&\s<>\n]+)`)
|
||||
errMsg = sasParamsRegex.ReplaceAllString(errMsg, `${1}${2}=***REDACTED***`)
|
||||
|
||||
// Check if this looks like an Azure HTTP response error
|
||||
// Azure errors contain patterns like "RESPONSE 404:" and "ERROR CODE:"
|
||||
if !strings.Contains(errMsg, "RESPONSE") || !strings.Contains(errMsg, "ERROR CODE:") {
|
||||
// Not an Azure-style error, return as-is
|
||||
return errMsg
|
||||
}
|
||||
|
||||
// Extract the error code (e.g., "ContainerNotFound", "BlobNotFound")
|
||||
errorCodeRegex := regexp.MustCompile(`ERROR CODE:\s*(\w+)`)
|
||||
errorCodeMatch := errorCodeRegex.FindStringSubmatch(errMsg)
|
||||
var errorCode string
|
||||
if len(errorCodeMatch) > 1 {
|
||||
errorCode = errorCodeMatch[1]
|
||||
}
|
||||
|
||||
// Extract the error message from the XML or plain text
|
||||
// Look for message between <Message> tags or after "RESPONSE XXX:"
|
||||
var errorMessage string
|
||||
|
||||
// Try to extract from XML first
|
||||
messageRegex := regexp.MustCompile(`<Message>(.*?)</Message>`)
|
||||
messageMatch := messageRegex.FindStringSubmatch(errMsg)
|
||||
if len(messageMatch) > 1 {
|
||||
errorMessage = messageMatch[1]
|
||||
// Remove RequestId and Time from the message
|
||||
if idx := strings.Index(errorMessage, "\nRequestId:"); idx != -1 {
|
||||
errorMessage = errorMessage[:idx]
|
||||
}
|
||||
} else {
|
||||
// Try to extract from plain text response (e.g., "RESPONSE 404: 404 The specified container does not exist.")
|
||||
responseRegex := regexp.MustCompile(`RESPONSE\s+\d+:\s+\d+\s+([^\n]+)`)
|
||||
responseMatch := responseRegex.FindStringSubmatch(errMsg)
|
||||
if len(responseMatch) > 1 {
|
||||
errorMessage = strings.TrimSpace(responseMatch[1])
|
||||
}
|
||||
}
|
||||
|
||||
// Build a clean error message
|
||||
var cleanMsg string
|
||||
if errorCode != "" && errorMessage != "" {
|
||||
cleanMsg = errorCode + ": " + errorMessage
|
||||
} else if errorCode != "" {
|
||||
cleanMsg = errorCode
|
||||
} else if errorMessage != "" {
|
||||
cleanMsg = errorMessage
|
||||
} else {
|
||||
// Fallback: try to extract the desc part from gRPC error
|
||||
descRegex := regexp.MustCompile(`desc\s*=\s*(.+)`)
|
||||
descMatch := descRegex.FindStringSubmatch(errMsg)
|
||||
if len(descMatch) > 1 {
|
||||
// Take everything up to the first newline or "RESPONSE" marker
|
||||
desc := descMatch[1]
|
||||
if idx := strings.Index(desc, "\n"); idx != -1 {
|
||||
desc = desc[:idx]
|
||||
}
|
||||
if idx := strings.Index(desc, "RESPONSE"); idx != -1 {
|
||||
desc = strings.TrimSpace(desc[:idx])
|
||||
}
|
||||
cleanMsg = desc
|
||||
} else {
|
||||
// Last resort: return first line
|
||||
if idx := strings.Index(errMsg, "\n"); idx != -1 {
|
||||
cleanMsg = errMsg[:idx]
|
||||
} else {
|
||||
cleanMsg = errMsg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve the prefix part of the error (e.g., "rpc error: code = Unknown desc = ")
|
||||
// but replace the verbose description with our clean message
|
||||
if strings.Contains(errMsg, "desc = ") {
|
||||
parts := strings.SplitN(errMsg, "desc = ", 2)
|
||||
if len(parts) == 2 {
|
||||
return parts[0] + "desc = " + cleanMsg
|
||||
}
|
||||
}
|
||||
|
||||
return cleanMsg
|
||||
}
|
||||
|
||||
// BackupStorageLocationReconciler reconciles a BackupStorageLocation object
|
||||
type backupStorageLocationReconciler struct {
|
||||
ctx context.Context
|
||||
@@ -125,9 +224,9 @@ func (r *backupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr
|
||||
if err != nil {
|
||||
log.Info("BackupStorageLocation is invalid, marking as unavailable")
|
||||
err = errors.Wrapf(err, "BackupStorageLocation %q is unavailable", location.Name)
|
||||
unavailableErrors = append(unavailableErrors, err.Error())
|
||||
unavailableErrors = append(unavailableErrors, sanitizeStorageError(err))
|
||||
location.Status.Phase = velerov1api.BackupStorageLocationPhaseUnavailable
|
||||
location.Status.Message = err.Error()
|
||||
location.Status.Message = sanitizeStorageError(err)
|
||||
} else {
|
||||
log.Info("BackupStorageLocations is valid, marking as available")
|
||||
location.Status.Phase = velerov1api.BackupStorageLocationPhaseAvailable
|
||||
@@ -138,6 +237,12 @@ func (r *backupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr
|
||||
}
|
||||
}()
|
||||
|
||||
// Validate the BackupStorageLocation spec
|
||||
if err = location.Validate(); err != nil {
|
||||
log.WithError(err).Error("BackupStorageLocation spec is invalid")
|
||||
return
|
||||
}
|
||||
|
||||
backupStore, err := r.backupStoreGetter.Get(&location, pluginManager, log)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting a backup store")
|
||||
|
||||
@@ -303,3 +303,115 @@ func TestBSLReconcile(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeStorageError(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input error
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Nil error",
|
||||
input: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Simple error without Azure formatting",
|
||||
input: errors.New("simple error message"),
|
||||
expected: "simple error message",
|
||||
},
|
||||
{
|
||||
name: "AWS style error",
|
||||
input: errors.New("NoSuchBucket: The specified bucket does not exist"),
|
||||
expected: "NoSuchBucket: The specified bucket does not exist",
|
||||
},
|
||||
{
|
||||
name: "Azure container not found error with full HTTP response",
|
||||
input: errors.New(`rpc error: code = Unknown desc = GET https://oadp100711zl59k.blob.core.windows.net/oadp100711zl59k1
|
||||
--------------------------------------------------------------------------------
|
||||
RESPONSE 404: 404 The specified container does not exist.
|
||||
ERROR CODE: ContainerNotFound
|
||||
--------------------------------------------------------------------------------
|
||||
<?xml version="1.0" encoding="utf-8"?><Error><Code>ContainerNotFound</Code><Message>The specified container does not exist.
|
||||
RequestId:63cf34d8-801e-0078-09b4-2e4682000000
|
||||
Time:2024-11-04T12:23:04.5623627Z</Message></Error>
|
||||
--------------------------------------------------------------------------------
|
||||
`),
|
||||
expected: "rpc error: code = Unknown desc = ContainerNotFound: The specified container does not exist.",
|
||||
},
|
||||
{
|
||||
name: "Azure blob not found error",
|
||||
input: errors.New(`rpc error: code = Unknown desc = GET https://storage.blob.core.windows.net/container/blob
|
||||
--------------------------------------------------------------------------------
|
||||
RESPONSE 404: 404 The specified blob does not exist.
|
||||
ERROR CODE: BlobNotFound
|
||||
--------------------------------------------------------------------------------
|
||||
<?xml version="1.0" encoding="utf-8"?><Error><Code>BlobNotFound</Code><Message>The specified blob does not exist.
|
||||
RequestId:12345678-1234-1234-1234-123456789012
|
||||
Time:2024-11-04T12:23:04.5623627Z</Message></Error>
|
||||
--------------------------------------------------------------------------------
|
||||
`),
|
||||
expected: "rpc error: code = Unknown desc = BlobNotFound: The specified blob does not exist.",
|
||||
},
|
||||
{
|
||||
name: "Azure error with plain text response (no XML)",
|
||||
input: errors.New(`rpc error: code = Unknown desc = GET https://storage.blob.core.windows.net/container
|
||||
--------------------------------------------------------------------------------
|
||||
RESPONSE 404: 404 The specified container does not exist.
|
||||
ERROR CODE: ContainerNotFound
|
||||
--------------------------------------------------------------------------------
|
||||
`),
|
||||
expected: "rpc error: code = Unknown desc = ContainerNotFound: The specified container does not exist.",
|
||||
},
|
||||
{
|
||||
name: "Azure error without XML message but with error code",
|
||||
input: errors.New(`rpc error: code = Unknown desc = operation failed
|
||||
RESPONSE 403: 403 Forbidden
|
||||
ERROR CODE: AuthorizationFailure
|
||||
--------------------------------------------------------------------------------
|
||||
`),
|
||||
expected: "rpc error: code = Unknown desc = AuthorizationFailure: Forbidden",
|
||||
},
|
||||
{
|
||||
name: "Error with Azure SAS token in URL",
|
||||
input: errors.New(`rpc error: code = Unknown desc = GET https://storage.blob.core.windows.net/backup?sv=2020-08-04&sig=abc123secrettoken&se=2024-12-31T23:59:59Z&sp=rwdl
|
||||
--------------------------------------------------------------------------------
|
||||
RESPONSE 404: 404 The specified container does not exist.
|
||||
ERROR CODE: ContainerNotFound
|
||||
--------------------------------------------------------------------------------
|
||||
`),
|
||||
expected: "rpc error: code = Unknown desc = ContainerNotFound: The specified container does not exist.",
|
||||
},
|
||||
{
|
||||
name: "Error with multiple SAS parameters",
|
||||
input: errors.New(`GET https://mystorageaccount.blob.core.windows.net/container?sv=2020-08-04&ss=b&srt=sco&sp=rwdlac&se=2024-12-31&st=2024-01-01&sip=168.1.5.60&spr=https&sig=SIGNATURE_HASH`),
|
||||
expected: "GET https://mystorageaccount.blob.core.windows.net/container?sv=***REDACTED***&ss=***REDACTED***&srt=***REDACTED***&sp=***REDACTED***&se=***REDACTED***&st=***REDACTED***&sip=***REDACTED***&spr=***REDACTED***&sig=***REDACTED***",
|
||||
},
|
||||
{
|
||||
name: "Simple URL without SAS tokens unchanged",
|
||||
input: errors.New("GET https://storage.blob.core.windows.net/container/blob"),
|
||||
expected: "GET https://storage.blob.core.windows.net/container/blob",
|
||||
},
|
||||
{
|
||||
name: "Azure error with SAS token in full HTTP response",
|
||||
input: errors.New(`rpc error: code = Unknown desc = GET https://oadp100711zl59k.blob.core.windows.net/backup?sig=secretsignature123&se=2024-12-31
|
||||
--------------------------------------------------------------------------------
|
||||
RESPONSE 404: 404 The specified container does not exist.
|
||||
ERROR CODE: ContainerNotFound
|
||||
--------------------------------------------------------------------------------
|
||||
<?xml version="1.0" encoding="utf-8"?><Error><Code>ContainerNotFound</Code><Message>The specified container does not exist.
|
||||
RequestId:63cf34d8-801e-0078-09b4-2e4682000000
|
||||
Time:2024-11-04T12:23:04.5623627Z</Message></Error>
|
||||
--------------------------------------------------------------------------------
|
||||
`),
|
||||
expected: "rpc error: code = Unknown desc = ContainerNotFound: The specified container does not exist.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actual := sanitizeStorageError(test.input)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,6 +77,8 @@ type DataDownloadReconciler struct {
|
||||
cancelledDataDownload map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
repoConfigMgr repository.ConfigManager
|
||||
podLabels map[string]string
|
||||
podAnnotations map[string]string
|
||||
}
|
||||
|
||||
func NewDataDownloadReconciler(
|
||||
@@ -96,6 +98,8 @@ func NewDataDownloadReconciler(
|
||||
metrics *metrics.ServerMetrics,
|
||||
dataMovePriorityClass string,
|
||||
repoConfigMgr repository.ConfigManager,
|
||||
podLabels map[string]string,
|
||||
podAnnotations map[string]string,
|
||||
) *DataDownloadReconciler {
|
||||
return &DataDownloadReconciler{
|
||||
client: client,
|
||||
@@ -117,6 +121,8 @@ func NewDataDownloadReconciler(
|
||||
cancelledDataDownload: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
repoConfigMgr: repoConfigMgr,
|
||||
podLabels: podLabels,
|
||||
podAnnotations: podAnnotations,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -860,25 +866,37 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
|
||||
}
|
||||
|
||||
hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podLabels) > 0 {
|
||||
for k, v := range r.podLabels {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodAnnotation := map[string]string{}
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podAnnotations) > 0 {
|
||||
for k, v := range r.podAnnotations {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodTolerations := []corev1api.Toleration{}
|
||||
|
||||
@@ -129,7 +129,26 @@ func initDataDownloadReconcilerWithError(t *testing.T, objects []any, needError
|
||||
|
||||
dataPathMgr := datapath.NewManager(1)
|
||||
|
||||
return NewDataDownloadReconciler(&fakeClient, nil, fakeKubeClient, dataPathMgr, nil, nil, velerotypes.RestorePVC{}, nil, nil, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics(), "", nil), nil
|
||||
return NewDataDownloadReconciler(
|
||||
&fakeClient,
|
||||
nil,
|
||||
fakeKubeClient,
|
||||
dataPathMgr,
|
||||
nil,
|
||||
nil,
|
||||
velerotypes.RestorePVC{},
|
||||
nil,
|
||||
nil,
|
||||
corev1api.ResourceRequirements{},
|
||||
"test-node",
|
||||
time.Minute*5,
|
||||
velerotest.NewLogger(),
|
||||
metrics.NewServerMetrics(),
|
||||
"",
|
||||
nil,
|
||||
nil, // podLabels
|
||||
nil, // podAnnotations
|
||||
), nil
|
||||
}
|
||||
|
||||
func TestDataDownloadReconcile(t *testing.T) {
|
||||
@@ -1292,3 +1311,127 @@ func TestResumeCancellableRestore(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataDownloadSetupExposeParam(t *testing.T) {
|
||||
// Common objects for all cases
|
||||
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
|
||||
|
||||
baseDataDownload := dataDownloadBuilder().Result()
|
||||
baseDataDownload.Namespace = velerov1api.DefaultNamespace
|
||||
baseDataDownload.Spec.OperationTimeout = metav1.Duration{Duration: time.Minute * 10}
|
||||
baseDataDownload.Spec.SnapshotSize = 5368709120 // 5Gi
|
||||
|
||||
type args struct {
|
||||
customLabels map[string]string
|
||||
customAnnotations map[string]string
|
||||
}
|
||||
type want struct {
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "label has customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"custom-label": "label-value"},
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.DataDownloadLabel: baseDataDownload.Name,
|
||||
"custom-label": "label-value",
|
||||
},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label has no customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.DataDownloadLabel: baseDataDownload.Name},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation has customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.DataDownloadLabel: baseDataDownload.Name},
|
||||
annotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "both label and annotation have customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"custom-label": "label-value"},
|
||||
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.DataDownloadLabel: baseDataDownload.Name,
|
||||
"custom-label": "label-value",
|
||||
},
|
||||
annotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Fake clients per case
|
||||
fakeClient := FakeClient{
|
||||
Client: velerotest.NewFakeControllerRuntimeClient(t, node, baseDataDownload.DeepCopy()),
|
||||
}
|
||||
fakeKubeClient := clientgofake.NewSimpleClientset(node)
|
||||
|
||||
// Reconciler config per case
|
||||
preparingTimeout := time.Minute * 3
|
||||
podRes := corev1api.ResourceRequirements{}
|
||||
r := NewDataDownloadReconciler(
|
||||
&fakeClient,
|
||||
nil,
|
||||
fakeKubeClient,
|
||||
datapath.NewManager(1),
|
||||
nil,
|
||||
nil,
|
||||
velerotypes.RestorePVC{},
|
||||
nil,
|
||||
nil,
|
||||
podRes,
|
||||
"test-node",
|
||||
preparingTimeout,
|
||||
velerotest.NewLogger(),
|
||||
metrics.NewServerMetrics(),
|
||||
"download-priority",
|
||||
nil, // repoConfigMgr (unused when cacheVolumeConfigs is nil)
|
||||
tt.args.customLabels,
|
||||
tt.args.customAnnotations,
|
||||
)
|
||||
|
||||
// Act
|
||||
got, err := r.setupExposeParam(baseDataDownload)
|
||||
|
||||
// Assert no error
|
||||
require.NoError(t, err)
|
||||
|
||||
// Core fields
|
||||
assert.Equal(t, baseDataDownload.Spec.TargetVolume.PVC, got.TargetPVCName)
|
||||
assert.Equal(t, baseDataDownload.Spec.TargetVolume.Namespace, got.TargetNamespace)
|
||||
|
||||
// Labels and Annotations
|
||||
assert.Equal(t, tt.want.labels, got.HostingPodLabels)
|
||||
assert.Equal(t, tt.want.annotations, got.HostingPodAnnotations)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +83,8 @@ type DataUploadReconciler struct {
|
||||
metrics *metrics.ServerMetrics
|
||||
cancelledDataUpload map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
podLabels map[string]string
|
||||
podAnnotations map[string]string
|
||||
}
|
||||
|
||||
func NewDataUploadReconciler(
|
||||
@@ -101,6 +103,8 @@ func NewDataUploadReconciler(
|
||||
log logrus.FieldLogger,
|
||||
metrics *metrics.ServerMetrics,
|
||||
dataMovePriorityClass string,
|
||||
podLabels map[string]string,
|
||||
podAnnotations map[string]string,
|
||||
) *DataUploadReconciler {
|
||||
return &DataUploadReconciler{
|
||||
client: client,
|
||||
@@ -126,6 +130,8 @@ func NewDataUploadReconciler(
|
||||
metrics: metrics,
|
||||
cancelledDataUpload: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
podLabels: podLabels,
|
||||
podAnnotations: podAnnotations,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -936,25 +942,37 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
}
|
||||
|
||||
hostingPodLabels := map[string]string{velerov1api.DataUploadLabel: du.Name}
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podLabels) > 0 {
|
||||
for k, v := range r.podLabels {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodAnnotation := map[string]string{}
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podAnnotations) > 0 {
|
||||
for k, v := range r.podAnnotations {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodTolerations := []corev1api.Toleration{}
|
||||
|
||||
@@ -248,7 +248,9 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
|
||||
time.Minute*5,
|
||||
velerotest.NewLogger(),
|
||||
metrics.NewServerMetrics(),
|
||||
"", // dataMovePriorityClass
|
||||
"", // dataMovePriorityClass
|
||||
nil, // podLabels
|
||||
nil, // podAnnotations
|
||||
), nil
|
||||
}
|
||||
|
||||
@@ -1384,3 +1386,149 @@ func TestResumeCancellableBackup(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataUploadSetupExposeParam(t *testing.T) {
|
||||
// Common objects for all cases
|
||||
fileMode := corev1api.PersistentVolumeFilesystem
|
||||
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
|
||||
|
||||
pvc := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "app-ns",
|
||||
Name: "test-pvc",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "test-pv",
|
||||
VolumeMode: &fileMode,
|
||||
Resources: corev1api.VolumeResourceRequirements{
|
||||
Requests: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("10Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
}
|
||||
|
||||
baseDataUpload := dataUploadBuilder().Result()
|
||||
baseDataUpload.Spec.SourceNamespace = "app-ns"
|
||||
baseDataUpload.Spec.SourcePVC = "test-pvc"
|
||||
baseDataUpload.Namespace = velerov1api.DefaultNamespace
|
||||
baseDataUpload.Spec.OperationTimeout = metav1.Duration{Duration: time.Minute * 10}
|
||||
|
||||
type args struct {
|
||||
customLabels map[string]string
|
||||
customAnnotations map[string]string
|
||||
}
|
||||
type want struct {
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "label has customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"custom-label": "label-value"},
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.DataUploadLabel: baseDataUpload.Name,
|
||||
"custom-label": "label-value",
|
||||
},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label has no customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.DataUploadLabel: baseDataUpload.Name},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation has customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.DataUploadLabel: baseDataUpload.Name},
|
||||
annotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "both label and annotation have customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"custom-label": "label-value"},
|
||||
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.DataUploadLabel: baseDataUpload.Name,
|
||||
"custom-label": "label-value",
|
||||
},
|
||||
annotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Fake clients per case
|
||||
fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, pvc, pv, node, baseDataUpload.DeepCopy())
|
||||
fakeKubeClient := clientgofake.NewSimpleClientset(node)
|
||||
|
||||
// Reconciler config per case
|
||||
preparingTimeout := time.Minute * 3
|
||||
podRes := corev1api.ResourceRequirements{}
|
||||
r := NewDataUploadReconciler(
|
||||
fakeCRClient,
|
||||
nil,
|
||||
fakeKubeClient,
|
||||
nil, // snapshotClient (unused in setupExposeParam)
|
||||
datapath.NewManager(1),
|
||||
nil, // dataPathMgr
|
||||
nil, // exposer (unused in setupExposeParam)
|
||||
map[string]velerotypes.BackupPVC{},
|
||||
podRes,
|
||||
testclocks.NewFakeClock(time.Now()),
|
||||
"test-node",
|
||||
preparingTimeout,
|
||||
velerotest.NewLogger(),
|
||||
metrics.NewServerMetrics(),
|
||||
"upload-priority",
|
||||
tt.args.customLabels,
|
||||
tt.args.customAnnotations,
|
||||
)
|
||||
|
||||
// Act
|
||||
got, err := r.setupExposeParam(baseDataUpload)
|
||||
|
||||
// Assert no error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
|
||||
// Type assertion to CSISnapshotExposeParam
|
||||
csiParam, ok := got.(*exposer.CSISnapshotExposeParam)
|
||||
require.True(t, ok, "expected CSISnapshotExposeParam type")
|
||||
|
||||
// Labels and Annotations
|
||||
assert.Equal(t, tt.want.labels, csiParam.HostingPodLabels)
|
||||
assert.Equal(t, tt.want.annotations, csiParam.HostingPodAnnotations)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,9 +58,23 @@ const (
|
||||
)
|
||||
|
||||
// NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance
|
||||
func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, podResources corev1api.ResourceRequirements,
|
||||
metrics *metrics.ServerMetrics, logger logrus.FieldLogger, dataMovePriorityClass string, privileged bool) *PodVolumeBackupReconciler {
|
||||
func NewPodVolumeBackupReconciler(
|
||||
client client.Client,
|
||||
mgr manager.Manager,
|
||||
kubeClient kubernetes.Interface,
|
||||
dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter,
|
||||
nodeName string,
|
||||
preparingTimeout time.Duration,
|
||||
resourceTimeout time.Duration,
|
||||
podResources corev1api.ResourceRequirements,
|
||||
metrics *metrics.ServerMetrics,
|
||||
logger logrus.FieldLogger,
|
||||
dataMovePriorityClass string,
|
||||
privileged bool,
|
||||
podLabels map[string]string,
|
||||
podAnnotations map[string]string,
|
||||
) *PodVolumeBackupReconciler {
|
||||
return &PodVolumeBackupReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -78,6 +92,8 @@ func NewPodVolumeBackupReconciler(client client.Client, mgr manager.Manager, kub
|
||||
cancelledPVB: make(map[string]time.Time),
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
podLabels: podLabels,
|
||||
podAnnotations: podAnnotations,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,6 +115,8 @@ type PodVolumeBackupReconciler struct {
|
||||
cancelledPVB map[string]time.Time
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
podLabels map[string]string
|
||||
podAnnotations map[string]string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -796,25 +814,37 @@ func (r *PodVolumeBackupReconciler) setupExposeParam(pvb *velerov1api.PodVolumeB
|
||||
}
|
||||
|
||||
hostingPodLabels := map[string]string{velerov1api.PVBLabel: pvb.Name}
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podLabels) > 0 {
|
||||
for k, v := range r.podLabels {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodAnnotation := map[string]string{}
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podAnnotations) > 0 {
|
||||
for k, v := range r.podAnnotations {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvb.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodTolerations := []corev1api.Toleration{}
|
||||
|
||||
@@ -47,13 +47,12 @@ import (
|
||||
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/datapath"
|
||||
datapathmocks "github.com/vmware-tanzu/velero/pkg/datapath/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/exposer"
|
||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
datapathmocks "github.com/vmware-tanzu/velero/pkg/datapath/mocks"
|
||||
)
|
||||
|
||||
const pvbName = "pvb-1"
|
||||
@@ -153,6 +152,8 @@ func initPVBReconcilerWithError(needError ...error) (*PodVolumeBackupReconciler,
|
||||
velerotest.NewLogger(),
|
||||
"", // dataMovePriorityClass
|
||||
false, // privileged
|
||||
nil, // podLabels
|
||||
nil, // podAnnotations
|
||||
), nil
|
||||
}
|
||||
|
||||
@@ -1187,3 +1188,123 @@ func TestResumeCancellablePodVolumeBackup(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodVolumeBackupSetupExposeParam(t *testing.T) {
|
||||
// common objects for all cases
|
||||
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
|
||||
|
||||
basePVB := pvbBuilder().Result()
|
||||
basePVB.Spec.Node = "worker-1"
|
||||
basePVB.Spec.Pod.Namespace = "app-ns"
|
||||
basePVB.Spec.Pod.Name = "app-pod"
|
||||
basePVB.Spec.Volume = "data-vol"
|
||||
|
||||
type args struct {
|
||||
customLabels map[string]string
|
||||
customAnnotations map[string]string
|
||||
}
|
||||
type want struct {
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "label has customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"custom-label": "label-value"},
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.PVBLabel: basePVB.Name,
|
||||
"custom-label": "label-value",
|
||||
},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label has no customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.PVBLabel: basePVB.Name},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation has customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.PVBLabel: basePVB.Name},
|
||||
annotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation has no customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"another-label": "lval"},
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.PVBLabel: basePVB.Name,
|
||||
"another-label": "lval",
|
||||
},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Fake clients per case
|
||||
fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, node, basePVB.DeepCopy())
|
||||
fakeKubeClient := clientgofake.NewSimpleClientset(node)
|
||||
|
||||
// Reconciler config per case
|
||||
preparingTimeout := time.Minute * 3
|
||||
resourceTimeout := time.Minute * 10
|
||||
podRes := corev1api.ResourceRequirements{}
|
||||
r := NewPodVolumeBackupReconciler(
|
||||
fakeCRClient,
|
||||
nil,
|
||||
fakeKubeClient,
|
||||
datapath.NewManager(1),
|
||||
nil,
|
||||
"test-node",
|
||||
preparingTimeout,
|
||||
resourceTimeout,
|
||||
podRes,
|
||||
metrics.NewServerMetrics(),
|
||||
velerotest.NewLogger(),
|
||||
"backup-priority",
|
||||
true,
|
||||
tt.args.customLabels,
|
||||
tt.args.customAnnotations,
|
||||
)
|
||||
|
||||
// Act
|
||||
got := r.setupExposeParam(basePVB)
|
||||
|
||||
// Core fields
|
||||
assert.Equal(t, exposer.PodVolumeExposeTypeBackup, got.Type)
|
||||
assert.Equal(t, basePVB.Spec.Pod.Namespace, got.ClientNamespace)
|
||||
assert.Equal(t, basePVB.Spec.Pod.Name, got.ClientPodName)
|
||||
assert.Equal(t, basePVB.Spec.Volume, got.ClientPodVolume)
|
||||
|
||||
// Labels/Annotations
|
||||
assert.Equal(t, tt.want.labels, got.HostingPodLabels)
|
||||
assert.Equal(t, tt.want.annotations, got.HostingPodAnnotations)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,10 +56,25 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter, nodeName string, preparingTimeout time.Duration, resourceTimeout time.Duration, backupRepoConfigs map[string]string,
|
||||
cacheVolumeConfigs *velerotypes.CachePVC, podResources corev1api.ResourceRequirements, logger logrus.FieldLogger, dataMovePriorityClass string,
|
||||
privileged bool, repoConfigMgr repository.ConfigManager) *PodVolumeRestoreReconciler {
|
||||
func NewPodVolumeRestoreReconciler(
|
||||
client client.Client,
|
||||
mgr manager.Manager,
|
||||
kubeClient kubernetes.Interface,
|
||||
dataPathMgr *datapath.Manager,
|
||||
counter *exposer.VgdpCounter,
|
||||
nodeName string,
|
||||
preparingTimeout time.Duration,
|
||||
resourceTimeout time.Duration,
|
||||
backupRepoConfigs map[string]string,
|
||||
cacheVolumeConfigs *velerotypes.CachePVC,
|
||||
podResources corev1api.ResourceRequirements,
|
||||
logger logrus.FieldLogger,
|
||||
dataMovePriorityClass string,
|
||||
privileged bool,
|
||||
repoConfigMgr repository.ConfigManager,
|
||||
podLabels map[string]string,
|
||||
podAnnotations map[string]string,
|
||||
) *PodVolumeRestoreReconciler {
|
||||
return &PodVolumeRestoreReconciler{
|
||||
client: client,
|
||||
mgr: mgr,
|
||||
@@ -79,6 +94,8 @@ func NewPodVolumeRestoreReconciler(client client.Client, mgr manager.Manager, ku
|
||||
dataMovePriorityClass: dataMovePriorityClass,
|
||||
privileged: privileged,
|
||||
repoConfigMgr: repoConfigMgr,
|
||||
podLabels: podLabels,
|
||||
podAnnotations: podAnnotations,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +118,8 @@ type PodVolumeRestoreReconciler struct {
|
||||
dataMovePriorityClass string
|
||||
privileged bool
|
||||
repoConfigMgr repository.ConfigManager
|
||||
podLabels map[string]string
|
||||
podAnnotations map[string]string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -863,25 +882,37 @@ func (r *PodVolumeRestoreReconciler) setupExposeParam(pvr *velerov1api.PodVolume
|
||||
}
|
||||
|
||||
hostingPodLabels := map[string]string{velerov1api.PVRLabel: pvr.Name}
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podLabels) > 0 {
|
||||
for k, v := range r.podLabels {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodAnnotation := map[string]string{}
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
if len(r.podAnnotations) > 0 {
|
||||
for k, v := range r.podAnnotations {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v, err := nodeagent.GetAnnotationValue(context.Background(), r.kubeClient, pvr.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentAnnotationNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent annotation, skip adding host pod annotation %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodAnnotation[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostingPodTolerations := []corev1api.Toleration{}
|
||||
|
||||
@@ -617,7 +617,25 @@ func initPodVolumeRestoreReconcilerWithError(objects []runtime.Object, cliObj []
|
||||
|
||||
dataPathMgr := datapath.NewManager(1)
|
||||
|
||||
return NewPodVolumeRestoreReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nil, "test-node", time.Minute*5, time.Minute, nil, nil, corev1api.ResourceRequirements{}, velerotest.NewLogger(), "", false, nil), nil
|
||||
return NewPodVolumeRestoreReconciler(
|
||||
fakeClient,
|
||||
nil,
|
||||
fakeKubeClient,
|
||||
dataPathMgr,
|
||||
nil,
|
||||
"test-node",
|
||||
time.Minute*5,
|
||||
time.Minute,
|
||||
nil,
|
||||
nil,
|
||||
corev1api.ResourceRequirements{},
|
||||
velerotest.NewLogger(),
|
||||
"",
|
||||
false,
|
||||
nil,
|
||||
nil, // podLabels
|
||||
nil, // podAnnotations
|
||||
), nil
|
||||
}
|
||||
|
||||
func TestPodVolumeRestoreReconcile(t *testing.T) {
|
||||
@@ -1082,6 +1100,128 @@ func TestPodVolumeRestoreReconcile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodVolumeRestoreSetupExposeParam(t *testing.T) {
|
||||
// common objects for all cases
|
||||
node := builder.ForNode("worker-1").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
|
||||
|
||||
basePVR := pvrBuilder().Result()
|
||||
basePVR.Status.Node = "worker-1"
|
||||
basePVR.Spec.Pod.Namespace = "app-ns"
|
||||
basePVR.Spec.Pod.Name = "app-pod"
|
||||
basePVR.Spec.Volume = "data-vol"
|
||||
|
||||
type args struct {
|
||||
customLabels map[string]string
|
||||
customAnnotations map[string]string
|
||||
}
|
||||
type want struct {
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "label has customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"custom-label": "label-value"},
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.PVRLabel: basePVR.Name,
|
||||
"custom-label": "label-value",
|
||||
},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label has no customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.PVRLabel: basePVR.Name},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation has customize values",
|
||||
args: args{
|
||||
customLabels: nil,
|
||||
customAnnotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{velerov1api.PVRLabel: basePVR.Name},
|
||||
annotations: map[string]string{"custom-annotation": "annotation-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation has no customize values",
|
||||
args: args{
|
||||
customLabels: map[string]string{"another-label": "lval"},
|
||||
customAnnotations: nil,
|
||||
},
|
||||
want: want{
|
||||
labels: map[string]string{
|
||||
velerov1api.PVRLabel: basePVR.Name,
|
||||
"another-label": "lval",
|
||||
},
|
||||
annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Fake clients per case
|
||||
fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, node, basePVR.DeepCopy())
|
||||
fakeKubeClient := clientgofake.NewSimpleClientset(node)
|
||||
|
||||
// Reconciler config per case
|
||||
preparingTimeout := time.Minute * 3
|
||||
resourceTimeout := time.Minute * 10
|
||||
podRes := corev1api.ResourceRequirements{}
|
||||
r := NewPodVolumeRestoreReconciler(
|
||||
fakeCRClient,
|
||||
nil,
|
||||
fakeKubeClient,
|
||||
datapath.NewManager(1),
|
||||
nil,
|
||||
"test-node",
|
||||
preparingTimeout,
|
||||
resourceTimeout,
|
||||
nil, // backupRepoConfigs
|
||||
nil, // cacheVolumeConfigs -> keep nil so CacheVolume is nil
|
||||
podRes,
|
||||
velerotest.NewLogger(),
|
||||
"restore-priority",
|
||||
true,
|
||||
nil, // repoConfigMgr (unused when cacheVolumeConfigs is nil)
|
||||
tt.args.customLabels,
|
||||
tt.args.customAnnotations,
|
||||
)
|
||||
|
||||
// Act
|
||||
got := r.setupExposeParam(basePVR)
|
||||
|
||||
// Core fields
|
||||
assert.Equal(t, exposer.PodVolumeExposeTypeRestore, got.Type)
|
||||
assert.Equal(t, basePVR.Spec.Pod.Namespace, got.ClientNamespace)
|
||||
assert.Equal(t, basePVR.Spec.Pod.Name, got.ClientPodName)
|
||||
assert.Equal(t, basePVR.Spec.Volume, got.ClientPodVolume)
|
||||
|
||||
// Labels/Annotations
|
||||
assert.Equal(t, tt.want.labels, got.HostingPodLabels)
|
||||
assert.Equal(t, tt.want.annotations, got.HostingPodAnnotations)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnPodVolumeRestoreFailed(t *testing.T) {
|
||||
for _, getErr := range []bool{true, false} {
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -252,7 +252,7 @@ func (fs *fileSystemBR) boostRepoConnect(ctx context.Context, repositoryType str
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := repoProvider.NewResticRepositoryProvider(credentialGetter.FromFile, filesystem.NewFileSystem(), fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo}); err != nil {
|
||||
if err := repoProvider.NewResticRepositoryProvider(*credentialGetter, filesystem.NewFileSystem(), fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +107,9 @@ func TestAsyncBackup(t *testing.T) {
|
||||
|
||||
<-finish
|
||||
|
||||
// Ensure the goroutine finishes so deferred fs.close executes, satisfying mock expectations.
|
||||
fs.wgDataPath.Wait()
|
||||
|
||||
assert.Equal(t, test.err, asyncErr)
|
||||
assert.Equal(t, test.result, asyncResult)
|
||||
})
|
||||
@@ -192,6 +195,9 @@ func TestAsyncRestore(t *testing.T) {
|
||||
|
||||
<-finish
|
||||
|
||||
// Ensure the goroutine finishes so deferred fs.close executes, satisfying mock expectations.
|
||||
fs.wgDataPath.Wait()
|
||||
|
||||
assert.Equal(t, asyncErr, test.err)
|
||||
assert.Equal(t, asyncResult, test.result)
|
||||
})
|
||||
|
||||
@@ -184,7 +184,22 @@ func (e *podVolumeExposer) Expose(ctx context.Context, ownerObject corev1api.Obj
|
||||
}
|
||||
}
|
||||
|
||||
hostingPod, err := e.createHostingPod(ctx, ownerObject, param.Type, path.ByPath, param.OperationTimeout, param.HostingPodLabels, param.HostingPodAnnotations, param.HostingPodTolerations, pod.Spec.NodeName, param.Resources, nodeOS, param.PriorityClassName, param.Privileged, cachePVC)
|
||||
hostingPod, err := e.createHostingPod(
|
||||
ctx,
|
||||
ownerObject,
|
||||
param.Type,
|
||||
path.ByPath,
|
||||
param.OperationTimeout,
|
||||
param.HostingPodLabels,
|
||||
param.HostingPodAnnotations,
|
||||
param.HostingPodTolerations,
|
||||
pod.Spec.NodeName,
|
||||
param.Resources,
|
||||
nodeOS,
|
||||
param.PriorityClassName,
|
||||
param.Privileged,
|
||||
cachePVC,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to create hosting pod")
|
||||
}
|
||||
@@ -328,8 +343,22 @@ func (e *podVolumeExposer) CleanUp(ctx context.Context, ownerObject corev1api.Ob
|
||||
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), cachePVCName, ownerObject.Namespace, 0, e.log)
|
||||
}
|
||||
|
||||
func (e *podVolumeExposer) createHostingPod(ctx context.Context, ownerObject corev1api.ObjectReference, exposeType string, hostPath string,
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, toleration []corev1api.Toleration, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string, priorityClassName string, privileged bool, cachePVC *corev1api.PersistentVolumeClaim) (*corev1api.Pod, error) {
|
||||
func (e *podVolumeExposer) createHostingPod(
|
||||
ctx context.Context,
|
||||
ownerObject corev1api.ObjectReference,
|
||||
exposeType string,
|
||||
hostPath string,
|
||||
operationTimeout time.Duration,
|
||||
label map[string]string,
|
||||
annotation map[string]string,
|
||||
toleration []corev1api.Toleration,
|
||||
selectedNode string,
|
||||
resources corev1api.ResourceRequirements,
|
||||
nodeOS string,
|
||||
priorityClassName string,
|
||||
privileged bool,
|
||||
cachePVC *corev1api.PersistentVolumeClaim,
|
||||
) (*corev1api.Pod, error) {
|
||||
hostingPodName := ownerObject.Name
|
||||
|
||||
containerName := string(ownerObject.UID)
|
||||
|
||||
@@ -116,6 +116,7 @@ type ObjectBackupStoreGetter interface {
|
||||
|
||||
type objectBackupStoreGetter struct {
|
||||
credentialStore credentials.FileStore
|
||||
secretStore credentials.SecretStore
|
||||
}
|
||||
|
||||
// NewObjectBackupStoreGetter returns a ObjectBackupStoreGetter that can get a velero.BackupStore.
|
||||
@@ -123,6 +124,15 @@ func NewObjectBackupStoreGetter(credentialStore credentials.FileStore) ObjectBac
|
||||
return &objectBackupStoreGetter{credentialStore: credentialStore}
|
||||
}
|
||||
|
||||
// NewObjectBackupStoreGetterWithSecretStore returns an ObjectBackupStoreGetter with SecretStore
|
||||
// support for resolving caCertRef from Kubernetes Secrets.
|
||||
func NewObjectBackupStoreGetterWithSecretStore(credentialStore credentials.FileStore, secretStore credentials.SecretStore) ObjectBackupStoreGetter {
|
||||
return &objectBackupStoreGetter{
|
||||
credentialStore: credentialStore,
|
||||
secretStore: secretStore,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocation, objectStoreGetter ObjectStoreGetter, logger logrus.FieldLogger) (BackupStore, error) {
|
||||
if location.Spec.ObjectStorage == nil {
|
||||
return nil, errors.New("backup storage location does not use object storage")
|
||||
@@ -160,7 +170,16 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
|
||||
objectStoreConfig["prefix"] = prefix
|
||||
|
||||
// Only include a CACert if it's specified in order to maintain compatibility with plugins that don't expect it.
|
||||
if location.Spec.ObjectStorage.CACert != nil {
|
||||
// Prefer caCertRef (from Secret) over inline caCert (deprecated).
|
||||
if location.Spec.ObjectStorage.CACertRef != nil {
|
||||
if b.secretStore != nil {
|
||||
caCertString, err := b.secretStore.Get(location.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
objectStoreConfig["caCert"] = caCertString
|
||||
}
|
||||
} else if location.Spec.ObjectStorage.CACert != nil {
|
||||
objectStoreConfig["caCert"] = string(location.Spec.ObjectStorage.CACert)
|
||||
}
|
||||
|
||||
|
||||
@@ -1017,6 +1017,32 @@ func TestNewObjectBackupStoreGetterConfig(t *testing.T) {
|
||||
"credentialsFile": "/tmp/credentials/secret-file",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "location with CACertRef is initialized with caCert from secret",
|
||||
location: builder.ForBackupStorageLocation("", "").Provider(provider).Bucket(bucket).CACertRef(
|
||||
builder.ForSecretKeySelector("cacert-secret", "ca.crt").Result(),
|
||||
).Result(),
|
||||
getter: NewObjectBackupStoreGetterWithSecretStore(
|
||||
velerotest.NewFakeCredentialsFileStore("", nil),
|
||||
velerotest.NewFakeCredentialsSecretStore("cacert-from-secret", nil),
|
||||
),
|
||||
wantConfig: map[string]string{
|
||||
"bucket": "bucket",
|
||||
"prefix": "",
|
||||
"caCert": "cacert-from-secret",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "location with CACertRef and no SecretStore uses no caCert",
|
||||
location: builder.ForBackupStorageLocation("", "").Provider(provider).Bucket(bucket).CACertRef(
|
||||
builder.ForSecretKeySelector("cacert-secret", "ca.crt").Result(),
|
||||
).Result(),
|
||||
getter: NewObjectBackupStoreGetter(velerotest.NewFakeCredentialsFileStore("", nil)),
|
||||
wantConfig: map[string]string{
|
||||
"bucket": "bucket",
|
||||
"prefix": "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -33,6 +33,11 @@ import (
|
||||
// up on demand. On the other hand, the volumeHelperImpl assume there
|
||||
// is a VolumeHelper instance initialized before calling the
|
||||
// ShouldPerformXXX functions.
|
||||
//
|
||||
// Deprecated: Use ShouldPerformSnapshotWithVolumeHelper instead for better performance.
|
||||
// ShouldPerformSnapshotWithVolumeHelper allows passing a pre-created VolumeHelper with
|
||||
// an internal PVC-to-Pod cache, which avoids O(N*M) complexity when there are many PVCs and pods.
|
||||
// See issue #9179 for details.
|
||||
func ShouldPerformSnapshotWithBackup(
|
||||
unstructured runtime.Unstructured,
|
||||
groupResource schema.GroupResource,
|
||||
@@ -40,6 +45,35 @@ func ShouldPerformSnapshotWithBackup(
|
||||
crClient crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
) (bool, error) {
|
||||
return ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructured,
|
||||
groupResource,
|
||||
backup,
|
||||
crClient,
|
||||
logger,
|
||||
nil, // no cached VolumeHelper, will create one
|
||||
)
|
||||
}
|
||||
|
||||
// ShouldPerformSnapshotWithVolumeHelper is like ShouldPerformSnapshotWithBackup
|
||||
// but accepts an optional VolumeHelper. If vh is non-nil, it will be used directly,
|
||||
// avoiding the overhead of creating a new VolumeHelper on each call.
|
||||
// This is useful for BIA plugins that process multiple PVCs during a single backup
|
||||
// and want to reuse the same VolumeHelper (with its internal cache) across calls.
|
||||
func ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructured runtime.Unstructured,
|
||||
groupResource schema.GroupResource,
|
||||
backup velerov1api.Backup,
|
||||
crClient crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
vh volumehelper.VolumeHelper,
|
||||
) (bool, error) {
|
||||
// If a VolumeHelper is provided, use it directly
|
||||
if vh != nil {
|
||||
return vh.ShouldPerformSnapshot(unstructured, groupResource)
|
||||
}
|
||||
|
||||
// Otherwise, create a new VolumeHelper (original behavior for third-party plugins)
|
||||
resourcePolicies, err := resourcepolicies.GetResourcePoliciesFromBackup(
|
||||
backup,
|
||||
crClient,
|
||||
@@ -49,6 +83,7 @@ func ShouldPerformSnapshotWithBackup(
|
||||
return false, err
|
||||
}
|
||||
|
||||
//nolint:staticcheck // Intentional use of deprecated function for backwards compatibility
|
||||
volumeHelperImpl := volumehelper.NewVolumeHelperImpl(
|
||||
resourcePolicies,
|
||||
backup.Spec.SnapshotVolumes,
|
||||
|
||||
324
pkg/plugin/utils/volumehelper/volume_policy_helper_test.go
Normal file
324
pkg/plugin/utils/volumehelper/volume_policy_helper_test.go
Normal file
@@ -0,0 +1,324 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volumehelper
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/volumehelper"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestShouldPerformSnapshotWithBackup(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
backup *velerov1api.Backup
|
||||
wantSnapshot bool
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "Returns true when snapshotVolumes not set",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "test-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
pv: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{
|
||||
Driver: "test-driver",
|
||||
},
|
||||
},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
wantSnapshot: true,
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "Returns false when snapshotVolumes is false",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "test-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
pv: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{
|
||||
Driver: "test-driver",
|
||||
},
|
||||
},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
SnapshotVolumes: boolPtr(false),
|
||||
},
|
||||
},
|
||||
wantSnapshot: false,
|
||||
wantError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create fake client with PV and PVC
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, tt.pv, tt.pvc)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.pvc)
|
||||
require.NoError(t, err)
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
logger := logrus.New()
|
||||
|
||||
// Call the function under test - this is the wrapper for third-party plugins
|
||||
result, err := ShouldPerformSnapshotWithBackup(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*tt.backup,
|
||||
client,
|
||||
logger,
|
||||
)
|
||||
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantSnapshot, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func TestShouldPerformSnapshotWithVolumeHelper(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
backup *velerov1api.Backup
|
||||
wantSnapshot bool
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "Returns true with nil VolumeHelper when snapshotVolumes not set",
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "test-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
},
|
||||
pv: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{
|
||||
Driver: "test-driver",
|
||||
},
|
||||
},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
},
|
||||
backup: &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
wantSnapshot: true,
|
||||
wantError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create fake client with PV
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, tt.pv, tt.pvc)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.pvc)
|
||||
require.NoError(t, err)
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
logger := logrus.New()
|
||||
|
||||
// Call the function under test with nil VolumeHelper
|
||||
// This exercises the fallback path that creates a new VolumeHelper per call
|
||||
result, err := ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*tt.backup,
|
||||
client,
|
||||
logger,
|
||||
nil, // Pass nil for VolumeHelper - exercises fallback path
|
||||
)
|
||||
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantSnapshot, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestShouldPerformSnapshotWithNonNilVolumeHelper tests the ShouldPerformSnapshotWithVolumeHelper
|
||||
// function when a pre-created VolumeHelper is passed. This exercises the cached path used
|
||||
// by BIA plugins for better performance.
|
||||
func TestShouldPerformSnapshotWithNonNilVolumeHelper(t *testing.T) {
|
||||
pvc := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "test-pv",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
pv := &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{
|
||||
Driver: "test-driver",
|
||||
},
|
||||
},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "test-pvc",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backup := &velerov1api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-backup",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: velerov1api.BackupSpec{
|
||||
IncludedNamespaces: []string{"default"},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake client with PV and PVC
|
||||
client := velerotest.NewFakeControllerRuntimeClient(t, pv, pvc)
|
||||
|
||||
logger := logrus.New()
|
||||
|
||||
// Create VolumeHelper using the internal function with namespace caching
|
||||
vh, err := volumehelper.NewVolumeHelperImplWithNamespaces(
|
||||
nil, // no resource policies for this test
|
||||
nil, // snapshotVolumes not set
|
||||
logger,
|
||||
client,
|
||||
false, // defaultVolumesToFSBackup
|
||||
true, // backupExcludePVC
|
||||
[]string{"default"},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vh)
|
||||
|
||||
// Convert PVC to unstructured
|
||||
pvcMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvc)
|
||||
require.NoError(t, err)
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: pvcMap}
|
||||
|
||||
// Call with non-nil VolumeHelper - exercises the cached path
|
||||
result, err := ShouldPerformSnapshotWithVolumeHelper(
|
||||
unstructuredPVC,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
*backup,
|
||||
client,
|
||||
logger,
|
||||
vh, // Pass non-nil VolumeHelper - exercises cached path
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.True(t, result, "Should return true for snapshot when snapshotVolumes not set")
|
||||
}
|
||||
@@ -290,9 +290,19 @@ func getJobConfig(
|
||||
if globalResult.PriorityClassName != "" {
|
||||
result.PriorityClassName = globalResult.PriorityClassName
|
||||
}
|
||||
|
||||
// Pod's labels are only read from global config, not per-repository
|
||||
if len(globalResult.PodLabels) > 0 {
|
||||
result.PodLabels = globalResult.PodLabels
|
||||
}
|
||||
|
||||
// Pod's annotations are only read from global config, not per-repository
|
||||
if len(globalResult.PodAnnotations) > 0 {
|
||||
result.PodAnnotations = globalResult.PodAnnotations
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugf("Didn't find content for repository %s in cm %s", repo.Name, repoMaintenanceJobConfig)
|
||||
logger.Debugf("Configuration content for repository %s is %+v", repo.Name, result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -580,18 +590,29 @@ func buildJob(
|
||||
podLabels := map[string]string{
|
||||
RepositoryNameLabel: velerolabel.ReturnNameOrHash(repo.Name),
|
||||
}
|
||||
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v := veleroutil.GetVeleroServerLabelValue(deployment, k); v != "" {
|
||||
if config != nil && len(config.PodLabels) > 0 {
|
||||
for k, v := range config.PodLabels {
|
||||
podLabels[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v := veleroutil.GetVeleroServerLabelValue(deployment, k); v != "" {
|
||||
podLabels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
podAnnotations := map[string]string{}
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v := veleroutil.GetVeleroServerAnnotationValue(deployment, k); v != "" {
|
||||
if config != nil && len(config.PodAnnotations) > 0 {
|
||||
for k, v := range config.PodAnnotations {
|
||||
podAnnotations[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, k := range util.ThirdPartyAnnotations {
|
||||
if v := veleroutil.GetVeleroServerAnnotationValue(deployment, k); v != "" {
|
||||
podAnnotations[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set arguments
|
||||
|
||||
@@ -538,6 +538,45 @@ func TestGetJobConfig(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Configs only exist in global section should supersede specific config",
|
||||
repoJobConfig: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: repoMaintenanceJobConfig,
|
||||
},
|
||||
Data: map[string]string{
|
||||
GlobalKeyForRepoMaintenanceJobCM: "{\"keepLatestMaintenanceJobs\":1,\"podResources\":{\"cpuRequest\":\"50m\",\"cpuLimit\":\"100m\",\"memoryRequest\":\"50Mi\",\"memoryLimit\":\"100Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"n2\"]}]}}],\"priorityClassName\":\"global-priority\",\"podAnnotations\":{\"global-key\":\"global-value\"},\"podLabels\":{\"global-key\":\"global-value\"}}",
|
||||
"test-default-kopia": "{\"podResources\":{\"cpuRequest\":\"100m\",\"cpuLimit\":\"200m\",\"memoryRequest\":\"100Mi\",\"memoryLimit\":\"200Mi\"},\"loadAffinity\":[{\"nodeSelector\":{\"matchExpressions\":[{\"key\":\"cloud.google.com/machine-family\",\"operator\":\"In\",\"values\":[\"e2\"]}]}}],\"priorityClassName\":\"specific-priority\",\"podAnnotations\":{\"specific-key\":\"specific-value\"},\"podLabels\":{\"specific-key\":\"specific-value\"}}",
|
||||
},
|
||||
},
|
||||
expectedConfig: &velerotypes.JobConfigs{
|
||||
KeepLatestMaintenanceJobs: &keepLatestMaintenanceJobs,
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
CPULimit: "200m",
|
||||
MemoryRequest: "100Mi",
|
||||
MemoryLimit: "200Mi",
|
||||
},
|
||||
LoadAffinities: []*kube.LoadAffinity{
|
||||
{
|
||||
NodeSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "cloud.google.com/machine-family",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"e2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PriorityClassName: "global-priority",
|
||||
PodAnnotations: map[string]string{"global-key": "global-value"},
|
||||
PodLabels: map[string]string{"global-key": "global-value"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -938,12 +977,12 @@ func TestBuildJob(t *testing.T) {
|
||||
deploy *appsv1api.Deployment
|
||||
logLevel logrus.Level
|
||||
logFormat *logging.FormatFlag
|
||||
thirdPartyLabel map[string]string
|
||||
expectedJobName string
|
||||
expectedError bool
|
||||
expectedEnv []corev1api.EnvVar
|
||||
expectedEnvFrom []corev1api.EnvFromSource
|
||||
expectedPodLabel map[string]string
|
||||
expectedPodAnnotation map[string]string
|
||||
expectedSecurityContext *corev1api.SecurityContext
|
||||
expectedPodSecurityContext *corev1api.PodSecurityContext
|
||||
expectedImagePullSecrets []corev1api.LocalObjectReference
|
||||
@@ -1065,6 +1104,68 @@ func TestBuildJob(t *testing.T) {
|
||||
expectedJobName: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job customized labels and annotations",
|
||||
m: &velerotypes.JobConfigs{
|
||||
PodResources: &kube.PodResources{
|
||||
CPURequest: "100m",
|
||||
MemoryRequest: "128Mi",
|
||||
CPULimit: "200m",
|
||||
MemoryLimit: "256Mi",
|
||||
},
|
||||
PodLabels: map[string]string{
|
||||
"global-label-1": "global-label-value-1",
|
||||
"global-label-2": "global-label-value-2",
|
||||
},
|
||||
PodAnnotations: map[string]string{
|
||||
"global-annotation-1": "global-annotation-value-1",
|
||||
"global-annotation-2": "global-annotation-value-2",
|
||||
},
|
||||
},
|
||||
deploy: deploy2,
|
||||
logLevel: logrus.InfoLevel,
|
||||
logFormat: logging.NewFormatFlag(),
|
||||
expectedError: false,
|
||||
expectedJobName: "test-123-maintain-job",
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodLabel: map[string]string{
|
||||
"global-label-1": "global-label-value-1",
|
||||
"global-label-2": "global-label-value-2",
|
||||
RepositoryNameLabel: "test-123",
|
||||
},
|
||||
expectedPodAnnotation: map[string]string{
|
||||
"global-annotation-1": "global-annotation-value-1",
|
||||
"global-annotation-2": "global-annotation-value-2",
|
||||
},
|
||||
expectedSecurityContext: nil,
|
||||
expectedPodSecurityContext: nil,
|
||||
expectedImagePullSecrets: []corev1api.LocalObjectReference{
|
||||
{
|
||||
Name: "imagePullSecret1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid maintenance job with third party labels and BackupRepository name longer than 63",
|
||||
m: &velerotypes.JobConfigs{
|
||||
|
||||
@@ -109,7 +109,10 @@ func NewManager(
|
||||
log: log,
|
||||
}
|
||||
|
||||
mgr.providers[velerov1api.BackupRepositoryTypeRestic] = provider.NewResticRepositoryProvider(credentialFileStore, mgr.fileSystem, mgr.log)
|
||||
mgr.providers[velerov1api.BackupRepositoryTypeRestic] = provider.NewResticRepositoryProvider(credentials.CredentialGetter{
|
||||
FromFile: credentialFileStore,
|
||||
FromSecret: credentialSecretStore,
|
||||
}, mgr.fileSystem, mgr.log)
|
||||
mgr.providers[velerov1api.BackupRepositoryTypeKopia] = provider.NewUnifiedRepoProvider(credentials.CredentialGetter{
|
||||
FromFile: credentialFileStore,
|
||||
FromSecret: credentialSecretStore,
|
||||
|
||||
@@ -28,9 +28,9 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
func NewResticRepositoryProvider(store credentials.FileStore, fs filesystem.Interface, log logrus.FieldLogger) Provider {
|
||||
func NewResticRepositoryProvider(credGetter credentials.CredentialGetter, fs filesystem.Interface, log logrus.FieldLogger) Provider {
|
||||
return &resticRepositoryProvider{
|
||||
svc: restic.NewRepositoryService(store, fs, log),
|
||||
svc: restic.NewRepositoryService(credGetter, fs, log),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ var getGCPCredentials = repoconfig.GetGCPCredentials
|
||||
var getS3BucketRegion = repoconfig.GetAWSBucketRegion
|
||||
|
||||
type localFuncTable struct {
|
||||
getStorageVariables func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error)
|
||||
getStorageVariables func(*velerov1api.BackupStorageLocation, string, string, map[string]string, credentials.CredentialGetter) (map[string]string, error)
|
||||
getStorageCredentials func(*velerov1api.BackupStorageLocation, credentials.FileStore) (map[string]string, error)
|
||||
}
|
||||
|
||||
@@ -427,7 +427,7 @@ func (urp *unifiedRepoProvider) GetStoreOptions(param any) (map[string]string, e
|
||||
return map[string]string{}, errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param)
|
||||
}
|
||||
|
||||
storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace, repoParam.BackupRepo.Spec.RepositoryConfig)
|
||||
storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace, repoParam.BackupRepo.Spec.RepositoryConfig, urp.credentialGetter)
|
||||
if err != nil {
|
||||
return map[string]string{}, errors.Wrap(err, "error to get storage variables")
|
||||
}
|
||||
@@ -539,7 +539,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr
|
||||
// so we would accept only the options that are well defined in the internal system.
|
||||
// Users' inputs should not be treated as safe any time.
|
||||
// We remove the unnecessary parameters and keep the modules/logics below safe
|
||||
func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string, backupRepoConfig map[string]string) (map[string]string, error) {
|
||||
func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string, backupRepoConfig map[string]string, credGetter credentials.CredentialGetter) (map[string]string, error) {
|
||||
result := make(map[string]string)
|
||||
|
||||
backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config)
|
||||
@@ -603,8 +603,23 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo
|
||||
|
||||
result[udmrepo.StoreOptionOssBucket] = bucket
|
||||
result[udmrepo.StoreOptionPrefix] = prefix
|
||||
if backupLocation.Spec.ObjectStorage != nil && backupLocation.Spec.ObjectStorage.CACert != nil {
|
||||
result[udmrepo.StoreOptionCACert] = base64.StdEncoding.EncodeToString(backupLocation.Spec.ObjectStorage.CACert)
|
||||
if backupLocation.Spec.ObjectStorage != nil {
|
||||
var caCertData []byte
|
||||
|
||||
// Try CACertRef first (new method), then fall back to CACert (deprecated)
|
||||
if backupLocation.Spec.ObjectStorage.CACertRef != nil {
|
||||
caCertString, err := credGetter.FromSecret.Get(backupLocation.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
caCertData = []byte(caCertString)
|
||||
} else if backupLocation.Spec.ObjectStorage.CACert != nil {
|
||||
caCertData = backupLocation.Spec.ObjectStorage.CACert
|
||||
}
|
||||
|
||||
if caCertData != nil {
|
||||
result[udmrepo.StoreOptionCACert] = base64.StdEncoding.EncodeToString(caCertData)
|
||||
}
|
||||
}
|
||||
result[udmrepo.StoreOptionOssRegion] = strings.Trim(region, "/")
|
||||
result[udmrepo.StoreOptionFsPath] = config["fspath"]
|
||||
|
||||
@@ -465,7 +465,7 @@ func TestGetStorageVariables(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
getS3BucketRegion = tc.getS3BucketRegion
|
||||
|
||||
actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName, tc.repoConfig)
|
||||
actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName, tc.repoConfig, velerocredentials.CredentialGetter{})
|
||||
|
||||
require.Equal(t, tc.expected, actual)
|
||||
|
||||
@@ -554,7 +554,7 @@ func TestGetStoreOptions(t *testing.T) {
|
||||
BackupRepo: &velerov1api.BackupRepository{},
|
||||
},
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("fake-error-2")
|
||||
},
|
||||
},
|
||||
@@ -568,7 +568,7 @@ func TestGetStoreOptions(t *testing.T) {
|
||||
BackupRepo: &velerov1api.BackupRepository{},
|
||||
},
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -637,7 +637,7 @@ func TestPrepareRepo(t *testing.T) {
|
||||
repoService: new(reposervicenmocks.BackupRepoService),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("fake-store-option-error")
|
||||
},
|
||||
},
|
||||
@@ -648,7 +648,7 @@ func TestPrepareRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -666,7 +666,7 @@ func TestPrepareRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -687,7 +687,7 @@ func TestPrepareRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -703,12 +703,33 @@ func TestPrepareRepo(t *testing.T) {
|
||||
},
|
||||
expectedErr: "cannot create new backup repo for read-only backup storage location velero/fake-bsl",
|
||||
},
|
||||
{
|
||||
name: "create fail",
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
},
|
||||
repoService: new(reposervicenmocks.BackupRepoService),
|
||||
retFuncCheck: func(ctx context.Context, repoOption udmrepo.RepoOptions) (bool, error) {
|
||||
return false, nil
|
||||
},
|
||||
retFuncCreate: func(ctx context.Context, repoOption udmrepo.RepoOptions) error {
|
||||
return errors.New("fake-error-1")
|
||||
},
|
||||
expectedErr: "error to create backup repo: fake-error-1",
|
||||
},
|
||||
{
|
||||
name: "initialize error",
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -729,7 +750,7 @@ func TestPrepareRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -812,7 +833,7 @@ func TestForget(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -836,7 +857,7 @@ func TestForget(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -864,7 +885,7 @@ func TestForget(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -962,7 +983,7 @@ func TestBatchForget(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -986,7 +1007,7 @@ func TestBatchForget(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1015,7 +1036,7 @@ func TestBatchForget(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1124,7 +1145,7 @@ func TestInitRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1142,7 +1163,7 @@ func TestInitRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1218,7 +1239,7 @@ func TestConnectToRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1236,7 +1257,7 @@ func TestConnectToRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1310,7 +1331,7 @@ func TestBoostRepoConnect(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1337,7 +1358,7 @@ func TestBoostRepoConnect(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1363,7 +1384,7 @@ func TestBoostRepoConnect(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1450,7 +1471,7 @@ func TestPruneRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
@@ -1468,7 +1489,7 @@ func TestPruneRepo(t *testing.T) {
|
||||
getter: new(credmock.SecretStore),
|
||||
credStoreReturn: "fake-password",
|
||||
funcTable: localFuncTable{
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) {
|
||||
getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string, velerocredentials.CredentialGetter) (map[string]string, error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) {
|
||||
|
||||
@@ -31,18 +31,18 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
func NewRepositoryService(store credentials.FileStore, fs filesystem.Interface, log logrus.FieldLogger) *RepositoryService {
|
||||
func NewRepositoryService(credGetter credentials.CredentialGetter, fs filesystem.Interface, log logrus.FieldLogger) *RepositoryService {
|
||||
return &RepositoryService{
|
||||
credentialsFileStore: store,
|
||||
fileSystem: fs,
|
||||
log: log,
|
||||
credGetter: credGetter,
|
||||
fileSystem: fs,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
type RepositoryService struct {
|
||||
credentialsFileStore credentials.FileStore
|
||||
fileSystem filesystem.Interface
|
||||
log logrus.FieldLogger
|
||||
credGetter credentials.CredentialGetter
|
||||
fileSystem filesystem.Interface
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
func (r *RepositoryService) InitRepo(bsl *velerov1api.BackupStorageLocation, repo *velerov1api.BackupRepository) error {
|
||||
@@ -77,7 +77,7 @@ func (r *RepositoryService) DefaultMaintenanceFrequency() time.Duration {
|
||||
}
|
||||
|
||||
func (r *RepositoryService) exec(cmd *restic.Command, bsl *velerov1api.BackupStorageLocation) error {
|
||||
file, err := r.credentialsFileStore.Path(repokey.RepoKeySelector())
|
||||
file, err := r.credGetter.FromFile.Path(repokey.RepoKeySelector())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,17 +88,37 @@ func (r *RepositoryService) exec(cmd *restic.Command, bsl *velerov1api.BackupSto
|
||||
|
||||
// if there's a caCert on the ObjectStorage, write it to disk so that it can be passed to restic
|
||||
var caCertFile string
|
||||
if bsl.Spec.ObjectStorage != nil && bsl.Spec.ObjectStorage.CACert != nil {
|
||||
caCertFile, err = restic.TempCACertFile(bsl.Spec.ObjectStorage.CACert, bsl.Name, r.fileSystem)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating temp cacert file")
|
||||
if bsl.Spec.ObjectStorage != nil {
|
||||
var caCertData []byte
|
||||
|
||||
// Try CACertRef first (new method), then fall back to CACert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
caCertString, err := r.credGetter.FromSecret.Get(bsl.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
caCertData = []byte(caCertString)
|
||||
} else if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
caCertData = bsl.Spec.ObjectStorage.CACert
|
||||
}
|
||||
|
||||
if caCertData != nil {
|
||||
caCertFile, err = restic.TempCACertFile(caCertData, bsl.Name, r.fileSystem)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating temp cacert file")
|
||||
}
|
||||
// ignore error since there's nothing we can do and it's a temp file.
|
||||
defer os.Remove(caCertFile)
|
||||
}
|
||||
// ignore error since there's nothing we can do and it's a temp file.
|
||||
defer os.Remove(caCertFile)
|
||||
}
|
||||
cmd.CACertFile = caCertFile
|
||||
|
||||
env, err := restic.CmdEnv(bsl, r.credentialsFileStore)
|
||||
// CmdEnv uses credGetter.FromFile (not FromSecret) to get cloud provider credentials.
|
||||
// FromFile materializes the BSL's Credential secret to a file path that cloud SDKs
|
||||
// can read (e.g., AWS_SHARED_CREDENTIALS_FILE). This is different from caCertRef above,
|
||||
// which uses FromSecret to read the CA certificate data directly into memory, then
|
||||
// writes it to a temp file because restic CLI only accepts file paths (--cacert flag).
|
||||
env, err := restic.CmdEnv(bsl, r.credGetter.FromFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -47,3 +47,31 @@ func NewFakeCredentialsFileStore(path string, err error) FileStore {
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// SecretStore defines operations for interacting with credentials
|
||||
// that are stored in Secret.
|
||||
type SecretStore interface {
|
||||
// Get returns the secret key defined by the given selector
|
||||
Get(selector *corev1api.SecretKeySelector) (string, error)
|
||||
}
|
||||
|
||||
type fakeCredentialsSecretStore struct {
|
||||
data string
|
||||
err error
|
||||
}
|
||||
|
||||
// Get returns the secret data.
|
||||
func (f *fakeCredentialsSecretStore) Get(*corev1api.SecretKeySelector) (string, error) {
|
||||
return f.data, f.err
|
||||
}
|
||||
|
||||
// NewFakeCredentialsSecretStore creates a SecretStore which will return the given data
|
||||
// and error when Get is called.
|
||||
// data is the secret value to return (e.g., certificate content).
|
||||
// err is the error to return, if any.
|
||||
func NewFakeCredentialsSecretStore(data string, err error) SecretStore {
|
||||
return &fakeCredentialsSecretStore{
|
||||
data: data,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,4 +98,10 @@ type NodeAgentConfigs struct {
|
||||
|
||||
// CachePVCConfig is the config for cachePVC
|
||||
CachePVCConfig *CachePVC `json:"cachePVC,omitempty"`
|
||||
|
||||
// PodAnnotations are annotations to be added to pods created by node-agent, i.e., data mover pods.
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
|
||||
// PodLabels are labels to be added to pods created by node-agent, i.e., data mover pods.
|
||||
PodLabels map[string]string `json:"podLabels,omitempty"`
|
||||
}
|
||||
|
||||
@@ -31,4 +31,12 @@ type JobConfigs struct {
|
||||
// PriorityClassName is the priority class name for the maintenance job pod
|
||||
// Note: This is only read from the global configuration, not per-repository
|
||||
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||
|
||||
// PodAnnotations are annotations to be added to maintenance job pods.
|
||||
// Note: This is only read from the global configuration, not per-repository
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
|
||||
// PodLabels are labels to be added to maintenance job pods.
|
||||
// Note: This is only read from the global configuration, not per-repository
|
||||
PodLabels map[string]string `json:"podLabels,omitempty"`
|
||||
}
|
||||
|
||||
@@ -73,10 +73,25 @@ func NewResticUploaderProvider(
|
||||
}
|
||||
|
||||
// if there's a caCert on the ObjectStorage, write it to disk so that it can be passed to restic
|
||||
if bsl.Spec.ObjectStorage != nil && bsl.Spec.ObjectStorage.CACert != nil {
|
||||
provider.caCertFile, err = resticTempCACertFileFunc(bsl.Spec.ObjectStorage.CACert, bsl.Name, filesystem.NewFileSystem())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error create temp cert file")
|
||||
if bsl.Spec.ObjectStorage != nil {
|
||||
var caCertData []byte
|
||||
|
||||
// Try CACertRef first (new method), then fall back to CACert (deprecated)
|
||||
if bsl.Spec.ObjectStorage.CACertRef != nil {
|
||||
caCertString, err := credGetter.FromSecret.Get(bsl.Spec.ObjectStorage.CACertRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting CA certificate from secret")
|
||||
}
|
||||
caCertData = []byte(caCertString)
|
||||
} else if bsl.Spec.ObjectStorage.CACert != nil {
|
||||
caCertData = bsl.Spec.ObjectStorage.CACert
|
||||
}
|
||||
|
||||
if caCertData != nil {
|
||||
provider.caCertFile, err = resticTempCACertFileFunc(caCertData, bsl.Name, filesystem.NewFileSystem())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error create temp cert file")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package podvolume
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
@@ -29,6 +30,149 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
)
|
||||
|
||||
// PVCPodCache provides a cached mapping from PVC to the pods that use it.
|
||||
// This cache is built once per backup to avoid repeated pod listings which
|
||||
// cause O(N*M) performance issues when there are many PVCs and pods.
|
||||
type PVCPodCache struct {
|
||||
mu sync.RWMutex
|
||||
// cache maps namespace -> pvcName -> []Pod
|
||||
cache map[string]map[string][]corev1api.Pod
|
||||
// built indicates whether the cache has been populated
|
||||
built bool
|
||||
}
|
||||
|
||||
// NewPVCPodCache creates a new empty PVC to Pod cache.
|
||||
func NewPVCPodCache() *PVCPodCache {
|
||||
return &PVCPodCache{
|
||||
cache: make(map[string]map[string][]corev1api.Pod),
|
||||
built: false,
|
||||
}
|
||||
}
|
||||
|
||||
// BuildCacheForNamespaces builds the cache by listing pods once per namespace.
|
||||
// This is much more efficient than listing pods for each PVC lookup.
|
||||
func (c *PVCPodCache) BuildCacheForNamespaces(
|
||||
ctx context.Context,
|
||||
namespaces []string,
|
||||
crClient crclient.Client,
|
||||
) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for _, ns := range namespaces {
|
||||
podList := new(corev1api.PodList)
|
||||
if err := crClient.List(
|
||||
ctx,
|
||||
podList,
|
||||
&crclient.ListOptions{Namespace: ns},
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "failed to list pods in namespace %s", ns)
|
||||
}
|
||||
|
||||
if c.cache[ns] == nil {
|
||||
c.cache[ns] = make(map[string][]corev1api.Pod)
|
||||
}
|
||||
|
||||
// Build mapping from PVC name to pods
|
||||
for i := range podList.Items {
|
||||
pod := podList.Items[i]
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
pvcName := v.PersistentVolumeClaim.ClaimName
|
||||
c.cache[ns][pvcName] = append(c.cache[ns][pvcName], pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.built = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPodsUsingPVC retrieves pods using a specific PVC from the cache.
|
||||
// Returns nil slice if the PVC is not found in the cache.
|
||||
func (c *PVCPodCache) GetPodsUsingPVC(namespace, pvcName string) []corev1api.Pod {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
if nsPods, ok := c.cache[namespace]; ok {
|
||||
if pods, ok := nsPods[pvcName]; ok {
|
||||
// Return a copy to avoid race conditions
|
||||
result := make([]corev1api.Pod, len(pods))
|
||||
copy(result, pods)
|
||||
return result
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsBuilt returns true if the cache has been built.
|
||||
func (c *PVCPodCache) IsBuilt() bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.built
|
||||
}
|
||||
|
||||
// IsNamespaceBuilt returns true if the cache has been built for the given namespace.
|
||||
func (c *PVCPodCache) IsNamespaceBuilt(namespace string) bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
_, ok := c.cache[namespace]
|
||||
return ok
|
||||
}
|
||||
|
||||
// BuildCacheForNamespace builds the cache for a single namespace lazily.
|
||||
// This is used by plugins where namespaces are encountered one at a time.
|
||||
// If the namespace is already cached, this is a no-op.
|
||||
func (c *PVCPodCache) BuildCacheForNamespace(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
crClient crclient.Client,
|
||||
) error {
|
||||
// Check if already built (read lock first for performance)
|
||||
c.mu.RLock()
|
||||
if _, ok := c.cache[namespace]; ok {
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
// Need to build - acquire write lock
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
if _, ok := c.cache[namespace]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
podList := new(corev1api.PodList)
|
||||
if err := crClient.List(
|
||||
ctx,
|
||||
podList,
|
||||
&crclient.ListOptions{Namespace: namespace},
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "failed to list pods in namespace %s", namespace)
|
||||
}
|
||||
|
||||
c.cache[namespace] = make(map[string][]corev1api.Pod)
|
||||
|
||||
// Build mapping from PVC name to pods
|
||||
for i := range podList.Items {
|
||||
pod := podList.Items[i]
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
pvcName := v.PersistentVolumeClaim.ClaimName
|
||||
c.cache[namespace][pvcName] = append(c.cache[namespace][pvcName], pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as built for GetPodsUsingPVCWithCache fallback logic
|
||||
c.built = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetVolumesByPod returns a list of volume names to backup for the provided pod.
|
||||
func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool, volsToProcessByLegacyApproach []string) ([]string, []string) {
|
||||
// tracks the volumes that have been explicitly opted out of backup via the annotation in the pod
|
||||
@@ -109,12 +253,35 @@ func GetVolumesToExclude(obj metav1.Object) []string {
|
||||
return strings.Split(annotations[velerov1api.VolumesToExcludeAnnotation], ",")
|
||||
}
|
||||
|
||||
func IsPVCDefaultToFSBackup(pvcNamespace, pvcName string, crClient crclient.Client, defaultVolumesToFsBackup bool) (bool, error) {
|
||||
pods, err := GetPodsUsingPVC(pvcNamespace, pvcName, crClient)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
// IsPVCDefaultToFSBackupWithCache checks if a PVC should default to fs-backup based on pod annotations.
|
||||
// If cache is nil or not built, it falls back to listing pods directly.
|
||||
// Note: In the main backup path, the cache is always built (via NewVolumeHelperImplWithNamespaces),
|
||||
// so the fallback is only used by plugins that don't need cache optimization.
|
||||
func IsPVCDefaultToFSBackupWithCache(
|
||||
pvcNamespace, pvcName string,
|
||||
crClient crclient.Client,
|
||||
defaultVolumesToFsBackup bool,
|
||||
cache *PVCPodCache,
|
||||
) (bool, error) {
|
||||
var pods []corev1api.Pod
|
||||
var err error
|
||||
|
||||
// Use cache if available, otherwise fall back to direct lookup
|
||||
if cache != nil && cache.IsBuilt() {
|
||||
pods = cache.GetPodsUsingPVC(pvcNamespace, pvcName)
|
||||
} else {
|
||||
pods, err = getPodsUsingPVCDirect(pvcNamespace, pvcName, crClient)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
return checkPodsForFSBackup(pods, pvcName, defaultVolumesToFsBackup)
|
||||
}
|
||||
|
||||
// checkPodsForFSBackup is a helper function that checks if any pod using the PVC
|
||||
// has the volume selected for fs-backup.
|
||||
func checkPodsForFSBackup(pods []corev1api.Pod, pvcName string, defaultVolumesToFsBackup bool) (bool, error) {
|
||||
for index := range pods {
|
||||
vols, _ := GetVolumesByPod(&pods[index], defaultVolumesToFsBackup, false, []string{})
|
||||
if len(vols) > 0 {
|
||||
@@ -140,7 +307,32 @@ func getPodVolumeNameForPVC(pod corev1api.Pod, pvcName string) (string, error) {
|
||||
return "", errors.Errorf("Pod %s/%s does not use PVC %s/%s", pod.Namespace, pod.Name, pod.Namespace, pvcName)
|
||||
}
|
||||
|
||||
func GetPodsUsingPVC(
|
||||
// GetPodsUsingPVCWithCache returns all pods that use the specified PVC.
|
||||
// If cache is available and built, it uses the cache for O(1) lookup.
|
||||
// Otherwise, it falls back to listing pods directly.
|
||||
// Note: In the main backup path, the cache is always built (via NewVolumeHelperImplWithNamespaces),
|
||||
// so the fallback is only used by plugins that don't need cache optimization.
|
||||
func GetPodsUsingPVCWithCache(
|
||||
pvcNamespace, pvcName string,
|
||||
crClient crclient.Client,
|
||||
cache *PVCPodCache,
|
||||
) ([]corev1api.Pod, error) {
|
||||
// Use cache if available
|
||||
if cache != nil && cache.IsBuilt() {
|
||||
pods := cache.GetPodsUsingPVC(pvcNamespace, pvcName)
|
||||
if pods == nil {
|
||||
return []corev1api.Pod{}, nil
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// Fall back to direct lookup (for plugins without cache)
|
||||
return getPodsUsingPVCDirect(pvcNamespace, pvcName, crClient)
|
||||
}
|
||||
|
||||
// getPodsUsingPVCDirect returns all pods in the given namespace that use the specified PVC.
|
||||
// This is an internal function that lists all pods in the namespace and filters them.
|
||||
func getPodsUsingPVCDirect(
|
||||
pvcNamespace, pvcName string,
|
||||
crClient crclient.Client,
|
||||
) ([]corev1api.Pod, error) {
|
||||
|
||||
@@ -382,196 +382,6 @@ func TestGetVolumesByPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPVCDefaultToFSBackup(t *testing.T) {
|
||||
objs := []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"backup.velero.io/backup-volumes": "csi-vol1",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: &corev1api.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "awesome-pod-1",
|
||||
Namespace: "awesome-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "awesome-csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "awesome-pod-2",
|
||||
Namespace: "awesome-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "awesome-csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "uploader-ns",
|
||||
Annotations: map[string]string{
|
||||
"backup.velero.io/backup-volumes": "csi-vol1",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "uploader-ns",
|
||||
Annotations: map[string]string{
|
||||
"backup.velero.io/backup-volumes": "csi-vol1",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
inPVCNamespace string
|
||||
inPVCName string
|
||||
expectedIsFSUploaderUsed bool
|
||||
defaultVolumesToFSBackup bool
|
||||
}{
|
||||
{
|
||||
name: "2 pods using PVC, 1 pod using uploader",
|
||||
inPVCNamespace: "default",
|
||||
inPVCName: "csi-pvc1",
|
||||
expectedIsFSUploaderUsed: true,
|
||||
defaultVolumesToFSBackup: false,
|
||||
},
|
||||
{
|
||||
name: "2 pods using PVC, 2 pods using uploader",
|
||||
inPVCNamespace: "uploader-ns",
|
||||
inPVCName: "csi-pvc1",
|
||||
expectedIsFSUploaderUsed: true,
|
||||
defaultVolumesToFSBackup: false,
|
||||
},
|
||||
{
|
||||
name: "2 pods using PVC, 0 pods using uploader",
|
||||
inPVCNamespace: "awesome-ns",
|
||||
inPVCName: "awesome-csi-pvc1",
|
||||
expectedIsFSUploaderUsed: false,
|
||||
defaultVolumesToFSBackup: false,
|
||||
},
|
||||
{
|
||||
name: "0 pods using PVC",
|
||||
inPVCNamespace: "default",
|
||||
inPVCName: "does-not-exist",
|
||||
expectedIsFSUploaderUsed: false,
|
||||
defaultVolumesToFSBackup: false,
|
||||
},
|
||||
{
|
||||
name: "2 pods using PVC, using uploader by default",
|
||||
inPVCNamespace: "awesome-ns",
|
||||
inPVCName: "awesome-csi-pvc1",
|
||||
expectedIsFSUploaderUsed: true,
|
||||
defaultVolumesToFSBackup: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualIsFSUploaderUsed, _ := IsPVCDefaultToFSBackup(tc.inPVCNamespace, tc.inPVCName, fakeClient, tc.defaultVolumesToFSBackup)
|
||||
assert.Equal(t, tc.expectedIsFSUploaderUsed, actualIsFSUploaderUsed)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodVolumeNameForPVC(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -677,122 +487,6 @@ func TestGetPodVolumeNameForPVC(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodsUsingPVC(t *testing.T) {
|
||||
objs := []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: &corev1api.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "awesome-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "csi-vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvcNamespace string
|
||||
pvcName string
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
name: "should find exactly 2 pods using the PVC",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "csi-pvc1",
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "should find exactly 1 pod using the PVC",
|
||||
pvcNamespace: "awesome-ns",
|
||||
pvcName: "csi-pvc1",
|
||||
expectedPodCount: 1,
|
||||
},
|
||||
{
|
||||
name: "should find 0 pods using the PVC",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "unused-pvc",
|
||||
expectedPodCount: 0,
|
||||
},
|
||||
{
|
||||
name: "should find 0 pods in non-existent namespace",
|
||||
pvcNamespace: "does-not-exist",
|
||||
pvcName: "csi-pvc1",
|
||||
expectedPodCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualPods, err := GetPodsUsingPVC(tc.pvcNamespace, tc.pvcName, fakeClient)
|
||||
require.NoErrorf(t, err, "Want error=nil; Got error=%v", err)
|
||||
assert.Lenf(t, actualPods, tc.expectedPodCount, "unexpected number of pods in result; Want: %d; Got: %d", tc.expectedPodCount, len(actualPods))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumesToProcess(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -886,3 +580,590 @@ func TestGetVolumesToProcess(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCPodCache_BuildAndGet(t *testing.T) {
|
||||
objs := []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "vol2",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: &corev1api.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod4",
|
||||
Namespace: "other-ns",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
namespaces []string
|
||||
pvcNamespace string
|
||||
pvcName string
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
name: "should find 2 pods using pvc1 in default namespace",
|
||||
namespaces: []string{"default", "other-ns"},
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc1",
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "should find 1 pod using pvc2 in default namespace",
|
||||
namespaces: []string{"default", "other-ns"},
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc2",
|
||||
expectedPodCount: 1,
|
||||
},
|
||||
{
|
||||
name: "should find 1 pod using pvc1 in other-ns",
|
||||
namespaces: []string{"default", "other-ns"},
|
||||
pvcNamespace: "other-ns",
|
||||
pvcName: "pvc1",
|
||||
expectedPodCount: 1,
|
||||
},
|
||||
{
|
||||
name: "should find 0 pods for non-existent PVC",
|
||||
namespaces: []string{"default", "other-ns"},
|
||||
pvcNamespace: "default",
|
||||
pvcName: "non-existent",
|
||||
expectedPodCount: 0,
|
||||
},
|
||||
{
|
||||
name: "should find 0 pods for non-existent namespace",
|
||||
namespaces: []string{"default", "other-ns"},
|
||||
pvcNamespace: "non-existent-ns",
|
||||
pvcName: "pvc1",
|
||||
expectedPodCount: 0,
|
||||
},
|
||||
{
|
||||
name: "should find 0 pods when namespace not in cache",
|
||||
namespaces: []string{"default"},
|
||||
pvcNamespace: "other-ns",
|
||||
pvcName: "pvc1",
|
||||
expectedPodCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cache := NewPVCPodCache()
|
||||
err := cache.BuildCacheForNamespaces(t.Context(), tc.namespaces, fakeClient)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, cache.IsBuilt())
|
||||
|
||||
pods := cache.GetPodsUsingPVC(tc.pvcNamespace, tc.pvcName)
|
||||
assert.Len(t, pods, tc.expectedPodCount, "unexpected number of pods")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodsUsingPVCWithCache(t *testing.T) {
|
||||
objs := []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvcNamespace string
|
||||
pvcName string
|
||||
buildCache bool
|
||||
useNilCache bool
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
name: "returns cached results when cache is available",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc1",
|
||||
buildCache: true,
|
||||
useNilCache: false,
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "falls back to direct lookup when cache is nil",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc1",
|
||||
buildCache: false,
|
||||
useNilCache: true,
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "falls back to direct lookup when cache is not built",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc1",
|
||||
buildCache: false,
|
||||
useNilCache: false,
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "returns empty slice for non-existent PVC with cache",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "non-existent",
|
||||
buildCache: true,
|
||||
useNilCache: false,
|
||||
expectedPodCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var cache *PVCPodCache
|
||||
if !tc.useNilCache {
|
||||
cache = NewPVCPodCache()
|
||||
if tc.buildCache {
|
||||
err := cache.BuildCacheForNamespaces(t.Context(), []string{"default"}, fakeClient)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
pods, err := GetPodsUsingPVCWithCache(tc.pvcNamespace, tc.pvcName, fakeClient, cache)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pods, tc.expectedPodCount, "unexpected number of pods")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPVCDefaultToFSBackupWithCache(t *testing.T) {
|
||||
objs := []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"backup.velero.io/backup-volumes": "vol1",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pvcNamespace string
|
||||
pvcName string
|
||||
defaultVolumesToFsBackup bool
|
||||
buildCache bool
|
||||
useNilCache bool
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "returns true for PVC with opt-in annotation using cache",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc1",
|
||||
defaultVolumesToFsBackup: false,
|
||||
buildCache: true,
|
||||
useNilCache: false,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "returns false for PVC without annotation using cache",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc2",
|
||||
defaultVolumesToFsBackup: false,
|
||||
buildCache: true,
|
||||
useNilCache: false,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "returns true for any PVC with defaultVolumesToFsBackup using cache",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc2",
|
||||
defaultVolumesToFsBackup: true,
|
||||
buildCache: true,
|
||||
useNilCache: false,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "falls back to direct lookup when cache is nil",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "pvc1",
|
||||
defaultVolumesToFsBackup: false,
|
||||
buildCache: false,
|
||||
useNilCache: true,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "returns false for non-existent PVC",
|
||||
pvcNamespace: "default",
|
||||
pvcName: "non-existent",
|
||||
defaultVolumesToFsBackup: false,
|
||||
buildCache: true,
|
||||
useNilCache: false,
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var cache *PVCPodCache
|
||||
if !tc.useNilCache {
|
||||
cache = NewPVCPodCache()
|
||||
if tc.buildCache {
|
||||
err := cache.BuildCacheForNamespaces(t.Context(), []string{"default"}, fakeClient)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
result, err := IsPVCDefaultToFSBackupWithCache(tc.pvcNamespace, tc.pvcName, fakeClient, tc.defaultVolumesToFsBackup, cache)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsNamespaceBuilt tests the IsNamespaceBuilt method for lazy per-namespace caching.
|
||||
func TestIsNamespaceBuilt(t *testing.T) {
|
||||
cache := NewPVCPodCache()
|
||||
|
||||
// Initially no namespace should be built
|
||||
assert.False(t, cache.IsNamespaceBuilt("ns1"), "namespace should not be built initially")
|
||||
assert.False(t, cache.IsNamespaceBuilt("ns2"), "namespace should not be built initially")
|
||||
|
||||
// Create a fake client with a pod in ns1
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, pod)
|
||||
|
||||
// Build cache for ns1
|
||||
err := cache.BuildCacheForNamespace(t.Context(), "ns1", fakeClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ns1 should be built, ns2 should not
|
||||
assert.True(t, cache.IsNamespaceBuilt("ns1"), "namespace ns1 should be built")
|
||||
assert.False(t, cache.IsNamespaceBuilt("ns2"), "namespace ns2 should not be built")
|
||||
|
||||
// Build cache for ns2 (empty namespace)
|
||||
err = cache.BuildCacheForNamespace(t.Context(), "ns2", fakeClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Both should now be built
|
||||
assert.True(t, cache.IsNamespaceBuilt("ns1"), "namespace ns1 should still be built")
|
||||
assert.True(t, cache.IsNamespaceBuilt("ns2"), "namespace ns2 should now be built")
|
||||
}
|
||||
|
||||
// TestBuildCacheForNamespace tests the lazy per-namespace cache building.
|
||||
func TestBuildCacheForNamespace(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []runtime.Object
|
||||
namespace string
|
||||
expectedPVCs map[string]int // pvcName -> expected pod count
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "build cache for namespace with pods using PVCs",
|
||||
namespace: "ns1",
|
||||
pods: []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1"},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod2", Namespace: "ns1"},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPVCs: map[string]int{"pvc1": 2},
|
||||
},
|
||||
{
|
||||
name: "build cache for empty namespace",
|
||||
namespace: "empty-ns",
|
||||
pods: []runtime.Object{},
|
||||
expectedPVCs: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "build cache ignores pods without PVCs",
|
||||
namespace: "ns1",
|
||||
pods: []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1"},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "config-vol",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
ConfigMap: &corev1api.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "my-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPVCs: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "build cache only for specified namespace",
|
||||
namespace: "ns1",
|
||||
pods: []runtime.Object{
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1"},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod2", Namespace: "ns2"},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPVCs: map[string]int{"pvc1": 1},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, tc.pods...)
|
||||
cache := NewPVCPodCache()
|
||||
|
||||
// Build cache for the namespace
|
||||
err := cache.BuildCacheForNamespace(t.Context(), tc.namespace, fakeClient)
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify namespace is marked as built
|
||||
assert.True(t, cache.IsNamespaceBuilt(tc.namespace))
|
||||
|
||||
// Verify PVC to pod mappings
|
||||
for pvcName, expectedCount := range tc.expectedPVCs {
|
||||
pods := cache.GetPodsUsingPVC(tc.namespace, pvcName)
|
||||
assert.Len(t, pods, expectedCount, "unexpected pod count for PVC %s", pvcName)
|
||||
}
|
||||
|
||||
// Calling BuildCacheForNamespace again should be a no-op
|
||||
err = cache.BuildCacheForNamespace(t.Context(), tc.namespace, fakeClient)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildCacheForNamespaceIdempotent verifies that building cache multiple times is safe.
|
||||
func TestBuildCacheForNamespaceIdempotent(t *testing.T) {
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1"},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, pod)
|
||||
cache := NewPVCPodCache()
|
||||
|
||||
// Build cache multiple times - should be idempotent
|
||||
for i := 0; i < 3; i++ {
|
||||
err := cache.BuildCacheForNamespace(t.Context(), "ns1", fakeClient)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, cache.IsNamespaceBuilt("ns1"))
|
||||
|
||||
pods := cache.GetPodsUsingPVC("ns1", "pvc1")
|
||||
assert.Len(t, pods, 1, "should have exactly 1 pod using pvc1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,13 +44,47 @@ spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: velero-backups
|
||||
# Base64 encoded CA certificate
|
||||
# Base64 encoded CA certificate (deprecated - use caCertRef instead)
|
||||
caCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1VENDQXFHZ0F3SUJBZ0lVTWRiWkNaYnBhcE9lYThDR0NMQnhhY3dVa213d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JERUxNQWtHQTFVRUJoTUNWVk14RXpBUkJnTlZCQWdNQ2tOaGJHbG1iM0p1YVdFeEZqQVVCZ05WQkFjTQpEVk5oYmlCR2NtRnVZMmx6WTI4eEdEQVdCZ05WQkFvTUQwVjRZVzF3YkdVZ1EyOXRjR0Z1ZVRFV01CUUdBMVVFCkF3d05aWGhoYlhCc1pTNXNiMk5oYkRBZUZ3MHlNekEzTVRBeE9UVXlNVGhhRncweU5EQTNNRGt4T1RVeU1UaGEKTUd3eEN6QUpCZ05WQkFZVEFsVlRNUk13RVFZRFZRUUNEQXBEWEJ4cG1iM0p1YVdFeEZqQVVCZ05WQkFjTURWTmgKYmlCR2NtRnVZMmx6WTI4eEdEQVdCZ05WQkFvTUQwVjRZVzF3YkdVZ1EyOXRjR0Z1ZVRFV01CUUdBMVVFQXd3TgpaWGhoYlhCc1pTNXNiMk5oYkRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS1dqCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
config:
|
||||
region: us-east-1
|
||||
s3Url: https://minio.example.com
|
||||
```
|
||||
|
||||
#### Using a CA Certificate with Secret Reference (Recommended)
|
||||
|
||||
The recommended approach is to use `caCertRef` to reference a Secret containing the CA certificate:
|
||||
|
||||
```yaml
|
||||
# First, create a Secret containing the CA certificate
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-ca-cert
|
||||
namespace: velero
|
||||
type: Opaque
|
||||
data:
|
||||
ca-bundle.crt: <base64-encoded-certificate>
|
||||
|
||||
---
|
||||
# Then reference it in the BackupStorageLocation
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: myBucket
|
||||
caCertRef:
|
||||
name: storage-ca-cert
|
||||
key: ca-bundle.crt
|
||||
# ... other configuration
|
||||
```
|
||||
|
||||
**Note:** You cannot specify both `caCert` and `caCertRef` in the same BackupStorageLocation. The `caCert` field is deprecated and will be removed in a future version.
|
||||
|
||||
### Parameter Reference
|
||||
|
||||
The configurable parameters are as follows:
|
||||
@@ -64,7 +98,10 @@ The configurable parameters are as follows:
|
||||
| `objectStorage` | ObjectStorageLocation | Required Field | Specification of the object storage for the given provider. |
|
||||
| `objectStorage/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/prefix` | String | Optional Field | The directory inside a storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/caCert` | String | Optional Field | A base64 encoded CA bundle to be used when verifying TLS connections |
|
||||
| `objectStorage/caCert` | String | Optional Field | **Deprecated**: Use `caCertRef` instead. A base64 encoded CA bundle to be used when verifying TLS connections |
|
||||
| `objectStorage/caCertRef` | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) | Optional Field | Reference to a Secret containing a CA bundle to be used when verifying TLS connections. The Secret must be in the same namespace as the BackupStorageLocation. |
|
||||
| `objectStorage/caCertRef/name` | String | Required Field (when using caCertRef) | The name of the Secret containing the CA certificate bundle |
|
||||
| `objectStorage/caCertRef/key` | String | Required Field (when using caCertRef) | The key within the Secret that contains the CA certificate bundle |
|
||||
| `config` | map[string]string | None (Optional) | Provider-specific configuration keys/values to be passed to the object store plugin. See [your object storage provider's plugin documentation](../supported-providers) for details. |
|
||||
| `accessMode` | String | `ReadWrite` | How Velero can access the backup storage location. Valid values are `ReadWrite`, `ReadOnly`. |
|
||||
| `backupSyncPeriod` | metav1.Duration | Optional Field | How frequently Velero should synchronize backups in object storage. Default is Velero's server backup sync period. Set this to `0s` to disable sync. |
|
||||
@@ -72,4 +109,4 @@ The configurable parameters are as follows:
|
||||
| `credential` | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) | Optional Field | The credential information to be used with this location. |
|
||||
| `credential/name` | String | Optional Field | The name of the secret within the Velero namespace which contains the credential information. |
|
||||
| `credential/key` | String | Optional Field | The key to use within the secret. |
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
@@ -18,7 +18,7 @@ Velero introduces a new section in the node-agent configMap, called ```podResour
|
||||
If it is not there, a configMap should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. The name of the configMap should be specified in the node-agent server parameter ```--node-agent-configmap```.
|
||||
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
|
||||
### Sample
|
||||
### Pod Resources
|
||||
Here is a sample of the configMap with ```podResources```:
|
||||
```json
|
||||
{
|
||||
@@ -27,8 +27,7 @@ Here is a sample of the configMap with ```podResources```:
|
||||
"cpuLimit": "1000m",
|
||||
"memoryRequest": "512Mi",
|
||||
"memoryLimit": "1Gi"
|
||||
},
|
||||
"priorityClassName": "high-priority"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -93,12 +92,6 @@ To configure priority class for data mover pods, include it in your node-agent c
|
||||
|
||||
```json
|
||||
{
|
||||
"podResources": {
|
||||
"cpuRequest": "1000m",
|
||||
"cpuLimit": "2000m",
|
||||
"memoryRequest": "1Gi",
|
||||
"memoryLimit": "4Gi"
|
||||
},
|
||||
"priorityClassName": "backup-priority"
|
||||
}
|
||||
```
|
||||
@@ -123,6 +116,47 @@ kubectl create cm node-agent-config -n velero --from-file=node-agent-config.json
|
||||
|
||||
**Note**: If the specified priority class doesn't exist in the cluster when data mover pods are created, the pods will fail to schedule. Velero validates the priority class at startup and logs a warning if it doesn't exist, but the pods will still attempt to use it.
|
||||
|
||||
### Pod Labels
|
||||
Add customized labels for data mover pods to support third-party integrations and environment-specific requirements.
|
||||
|
||||
If `podLabels` is configured, it supersedes Velero's [in-tree third-party labels](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L19-L21).
|
||||
If `podLabels` is not configured, Velero uses the in-tree third-party labels for compatibility with common cloud providers and networking solutions.
|
||||
|
||||
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
|
||||
|
||||
#### Configuration Example
|
||||
```json
|
||||
{
|
||||
"podLabels": {
|
||||
"spectrocloud.com/connection": "proxy",
|
||||
"gnp/k8s-api-access": "",
|
||||
"gnp/monitoring-client": "",
|
||||
"np/s3-backup-backend": "",
|
||||
"cp/inject-truststore": "extended"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Pod Annotations
|
||||
Add customized annotations for data mover pods to support third-party integrations and pod-level configuration.
|
||||
|
||||
If `podAnnotations` is configured, it supersedes Velero's [in-tree third-party annotations](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L23-L25).
|
||||
If `podAnnotations` is not configured, Velero uses the in-tree third-party annotations for compatibility with common cloud providers and networking solutions.
|
||||
|
||||
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
|
||||
|
||||
#### Configuration Example
|
||||
```json
|
||||
{
|
||||
"podAnnotations": {
|
||||
"iam.amazonaws.com/role": "velero-backup-role",
|
||||
"vault.hashicorp.com/agent-inject": "true",
|
||||
"prometheus.io/scrape": "true",
|
||||
"custom.company.com/environment": "production"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Node-agent Configuration](supported-configmaps/node-agent-configmap.md) - Complete reference for all configuration options
|
||||
|
||||
@@ -31,34 +31,73 @@ Take the following as an example:
|
||||
```
|
||||
|
||||
## Set the proxy required certificates
|
||||
In some cases, the proxy requires certificate to connect. Set the certificate in the BSL's `Spec.ObjectStorage.CACert`.
|
||||
It's possible that the object storage also requires certificate, and it's also set in `Spec.ObjectStorage.CACert`, then set both certificates in `Spec.ObjectStorage.CACert` field.
|
||||
In some cases, the proxy requires certificate to connect. You can provide certificates in the BSL configuration.
|
||||
It's possible that the object storage also requires certificate, then include both certificates together.
|
||||
|
||||
The following is an example file contains two certificates, then encode its content with base64, and set the encode result in the BSL.
|
||||
### Method 1: Using Kubernetes Secrets (Recommended)
|
||||
|
||||
The recommended approach is to store certificates in a Kubernetes Secret and reference them using `caCertRef`:
|
||||
|
||||
1. Create a file containing all required certificates:
|
||||
|
||||
``` bash
|
||||
cat certs
|
||||
-----BEGIN CERTIFICATE-----
|
||||
certificates first content
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
certificates second content
|
||||
-----END CERTIFICATE-----
|
||||
```
|
||||
|
||||
2. Create a Secret from the certificate file:
|
||||
|
||||
``` bash
|
||||
kubectl create secret generic proxy-ca-certs \
|
||||
--from-file=ca-bundle.crt=certs \
|
||||
-n velero
|
||||
```
|
||||
|
||||
3. Reference the Secret in your BackupStorageLocation:
|
||||
|
||||
``` yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: <YOUR_PROVIDER>
|
||||
default: true
|
||||
objectStorage:
|
||||
bucket: velero
|
||||
caCertRef:
|
||||
name: proxy-ca-certs
|
||||
key: ca-bundle.crt
|
||||
# ... other configuration
|
||||
```
|
||||
|
||||
### Method 2: Using inline certificates (Deprecated)
|
||||
|
||||
**Note:** The `caCert` field is deprecated. Use `caCertRef` for better security and management.
|
||||
|
||||
If you must use the inline method, encode the certificate content with base64:
|
||||
|
||||
``` bash
|
||||
cat certs
|
||||
-----BEGIN CERTIFICATE-----
|
||||
certificates first content
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
certificates second content
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
cat certs | base64
|
||||
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCmNlcnRpZmljYXRlcyBmaXJzdCBjb250ZW50Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpjZXJ0aWZpY2F0ZXMgc2Vjb25kIGNvbnRlbnQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
```
|
||||
|
||||
``` yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
...
|
||||
spec:
|
||||
...
|
||||
default: true
|
||||
objectStorage:
|
||||
bucket: velero
|
||||
caCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCmNlcnRpZmljYXRlcyBmaXJzdCBjb250ZW50Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpjZXJ0aWZpY2F0ZXMgc2Vjb25kIGNvbnRlbnQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
...
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
# ...
|
||||
spec:
|
||||
# ...
|
||||
default: true
|
||||
objectStorage:
|
||||
bucket: velero
|
||||
caCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCmNlcnRpZmljYXRlcyBmaXJzdCBjb250ZW50Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpjZXJ0aWZpY2F0ZXMgc2Vjb25kIGNvbnRlbnQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
# ...
|
||||
```
|
||||
|
||||
@@ -23,18 +23,91 @@ velero install \
|
||||
Velero will then automatically use the provided CA bundle to verify TLS connections to
|
||||
that storage provider when backing up and restoring.
|
||||
|
||||
## Trusting a self-signed certificate using Kubernetes Secrets (Recommended)
|
||||
|
||||
The recommended approach for managing CA certificates is to store them in a Kubernetes Secret and reference them in the BackupStorageLocation using `caCertRef`. This provides better security and easier certificate management:
|
||||
|
||||
1. Create a Secret containing your CA certificate:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic storage-ca-cert \
|
||||
--from-file=ca-bundle.crt=<PATH_TO_CA_BUNDLE> \
|
||||
-n velero
|
||||
```
|
||||
|
||||
2. Create or update your BackupStorageLocation to reference the Secret:
|
||||
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: <YOUR_PROVIDER>
|
||||
objectStorage:
|
||||
bucket: <YOUR_BUCKET>
|
||||
caCertRef:
|
||||
name: storage-ca-cert
|
||||
key: ca-bundle.crt
|
||||
# ... other configuration
|
||||
```
|
||||
|
||||
### Benefits of using Secrets
|
||||
|
||||
- **Security**: Certificates are stored encrypted in etcd
|
||||
- **Certificate Rotation**: Update the Secret to rotate certificates without modifying the BackupStorageLocation
|
||||
- **RBAC**: Control access to certificates using Kubernetes RBAC
|
||||
- **Separation of Concerns**: Keep sensitive certificate data separate from configuration
|
||||
|
||||
## Trusting a self-signed certificate with the Velero client
|
||||
|
||||
When using Velero client commands like describe, download, or logs to access backups or restores
|
||||
in storage secured by a self-signed certificate, the CA certificate can be configured in two ways:
|
||||
**Note**: As of Velero v1.15, the CLI automatically discovers certificates configured in the BackupStorageLocation. If you have configured certificates using either `caCert` (deprecated) or `caCertRef` (recommended) in your BSL, you no longer need to specify the `--cacert` flag for backup describe, download, or logs commands.
|
||||
|
||||
1. **Using the `--cacert` flag** (legacy method):
|
||||
### Automatic Certificate Discovery
|
||||
|
||||
```bash
|
||||
velero backup describe my-backup --cacert <PATH_TO_CA_BUNDLE>
|
||||
The Velero CLI automatically discovers and uses CA certificates from the BackupStorageLocation configuration. The resolution order is:
|
||||
|
||||
1. **`--cacert` flag** (if provided) - Takes highest precedence
|
||||
2. **`caCertRef`** - References a Secret containing the certificate (recommended)
|
||||
3. **`caCert`** - Inline certificate in the BSL (deprecated)
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
# Automatic discovery (no flag needed if BSL has caCertRef or caCert configured)
|
||||
velero backup describe my-backup
|
||||
velero backup download my-backup
|
||||
velero backup logs my-backup
|
||||
|
||||
# Manual override (takes precedence over BSL configuration)
|
||||
velero backup describe my-backup --cacert <PATH_TO_CA_BUNDLE>
|
||||
```
|
||||
|
||||
### Configuring CA Certificates in BackupStorageLocation
|
||||
|
||||
You can configure CA certificates in the BackupStorageLocation using either method:
|
||||
|
||||
1. **Using `caCertRef` (Recommended)**:
|
||||
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: velero-backups
|
||||
caCertRef:
|
||||
name: storage-ca-cert
|
||||
key: ca-bundle.crt
|
||||
config:
|
||||
region: us-east-1
|
||||
```
|
||||
|
||||
2. **Configuring the CA certificate in the BackupStorageLocation**:
|
||||
2. **Using inline `caCert` (Deprecated)**:
|
||||
|
||||
```yaml
|
||||
apiVersion: velero.io/v1
|
||||
@@ -51,7 +124,7 @@ in storage secured by a self-signed certificate, the CA certificate can be confi
|
||||
region: us-east-1
|
||||
```
|
||||
|
||||
When the CA certificate is configured in the BackupStorageLocation, Velero client commands will automatically use it without requiring the `--cacert` flag.
|
||||
When the CA certificate is configured in the BackupStorageLocation using either method, Velero client commands will automatically discover and use it without requiring the `--cacert` flag.
|
||||
|
||||
## Error with client certificate with custom S3 server
|
||||
|
||||
|
||||
@@ -426,6 +426,70 @@ For detailed information, see [Cache PVC Configuration for Data Movement Restore
|
||||
}
|
||||
```
|
||||
|
||||
### Pod Labels Configuration (`podLabels`)
|
||||
|
||||
Add customized labels for data mover pods to support third-party integrations and environment-specific requirements.
|
||||
|
||||
If `podLabels` is configured, it supersedes Velero's [in-tree third-party labels](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L19-L21).
|
||||
If `podLabels` is not configured, Velero uses the in-tree third-party labels for compatibility with common cloud providers and networking solutions.
|
||||
|
||||
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
|
||||
|
||||
#### Configuration Example
|
||||
```json
|
||||
{
|
||||
"podLabels": {
|
||||
"spectrocloud.com/connection": "proxy",
|
||||
"gnp/k8s-api-access": "",
|
||||
"gnp/monitoring-client": "",
|
||||
"np/s3-backup-backend": "",
|
||||
"cp/inject-truststore": "extended"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Use Cases
|
||||
- **Proxy Configuration**: Kubernetes environment requires proxy settings for external connections configured via labels
|
||||
- **Firewall Rules**: Network policies configured based on pod labels for traffic control
|
||||
- **Cloud Provider Integration**: Labels required by managed Kubernetes services (AKS, EKS, GKE)
|
||||
- **Security Policy Injection**: Labels that trigger security agent or certificate injection
|
||||
|
||||
#### Important Notes
|
||||
- **Third-party Label Replacement**: When `podLabels` is configured, Velero's built-in in-tree labels are NOT automatically added
|
||||
- **Explicit Configuration Required**: If you need both custom labels and in-tree third-party labels, explicitly include the in-tree labels in the `podLabels` configuration
|
||||
- **In-tree Labels**: The default in-tree labels include support for Azure workload identity
|
||||
|
||||
### Pod Annotations Configuration (`podAnnotations`)
|
||||
|
||||
Add customized annotations for data mover pods to support third-party integrations and pod-level configuration.
|
||||
|
||||
If `podAnnotations` is configured, it supersedes Velero's [in-tree third-party annotations](https://github.com/vmware-tanzu/velero/blob/94f64639cee09c5caaa65b65ab5f42175f41c101/pkg/util/third_party.go#L23-L25).
|
||||
If `podAnnotations` is not configured, Velero uses the in-tree third-party annotations for compatibility with common cloud providers and networking solutions.
|
||||
|
||||
The configurations work for DataUpload, DataDownload, PodVolumeBackup, and PodVolumeRestore pods.
|
||||
|
||||
#### Configuration Example
|
||||
```json
|
||||
{
|
||||
"podAnnotations": {
|
||||
"iam.amazonaws.com/role": "velero-backup-role",
|
||||
"vault.hashicorp.com/agent-inject": "true",
|
||||
"prometheus.io/scrape": "true",
|
||||
"custom.company.com/environment": "production"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Use Cases
|
||||
- **Secret Management Integration**: HashiCorp Vault or other secret managers using annotations for automatic secret injection
|
||||
- **Monitoring and Observability**: Prometheus scrape configurations and other monitoring tool annotations
|
||||
- **Custom Application Integration**: Company-specific annotations for operational tooling
|
||||
|
||||
#### Important Notes
|
||||
- **Third-party Annotation Replacement**: When `podAnnotations` is configured, Velero's built-in in-tree annotations are NOT automatically added
|
||||
- **Explicit Configuration Required**: If you need both custom annotations and in-tree third-party annotations, explicitly include the in-tree annotations in the `podAnnotations` configuration
|
||||
- **In-tree Annotations**: The default in-tree annotations include support for AWS IAM roles
|
||||
|
||||
## Complete Configuration Example
|
||||
Here's a comprehensive example showing how all configuration sections work together:
|
||||
|
||||
@@ -492,6 +556,19 @@ Here's a comprehensive example showing how all configuration sections work toget
|
||||
"cachePVC": {
|
||||
"thresholdInGB": 1,
|
||||
"storageClass": "cache-optimized-storage"
|
||||
},
|
||||
"podLabels": {
|
||||
"spectrocloud.com/connection": "proxy",
|
||||
"gnp/k8s-api-access": "",
|
||||
"gnp/monitoring-client": "",
|
||||
"np/s3-backup-backend": "",
|
||||
"cp/inject-truststore": "extended"
|
||||
},
|
||||
"podAnnotations": {
|
||||
"iam.amazonaws.com/role": "velero-backup-role",
|
||||
"vault.hashicorp.com/agent-inject": "true",
|
||||
"prometheus.io/scrape": "true",
|
||||
"custom.company.com/environment": "production"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -508,6 +585,7 @@ This configuration:
|
||||
- Enable privileged permission for PodVolume pods
|
||||
- Enable cache PVC for file system restore
|
||||
- The cache threshold is 1GB and use dedicated StorageClass
|
||||
- Use customized labels and annotations data mover pods
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
@@ -62,6 +62,8 @@ GINKGO_LABELS ?=
|
||||
# https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
|
||||
FAIL_FAST ?= false
|
||||
|
||||
VERSION ?= main
|
||||
|
||||
VELERO_CLI ?=$$(pwd)/../_output/bin/$(GOOS)/$(GOARCH)/velero
|
||||
|
||||
VELERO_IMAGE ?= velero/velero:main
|
||||
@@ -83,7 +85,7 @@ UPGRADE_FROM_VELERO_VERSION ?= v1.15.2,v1.16.2
|
||||
# to the end, nil string will be set if UPGRADE_FROM_VELERO_CLI is shorter than UPGRADE_FROM_VELERO_VERSION
|
||||
UPGRADE_FROM_VELERO_CLI ?=
|
||||
|
||||
MIGRATE_FROM_VELERO_VERSION ?= v1.15.2,self
|
||||
MIGRATE_FROM_VELERO_VERSION ?= v1.16.2,$(VERSION)
|
||||
MIGRATE_FROM_VELERO_CLI ?=
|
||||
|
||||
VELERO_NAMESPACE ?= velero
|
||||
|
||||
@@ -127,7 +127,7 @@ func init() {
|
||||
flag.StringVar(
|
||||
&test.VeleroCfg.UpgradeFromVeleroVersion,
|
||||
"upgrade-from-velero-version",
|
||||
"v1.7.1",
|
||||
"v1.16.2",
|
||||
"comma-separated list of Velero version to be tested with for the pre-upgrade velero server.",
|
||||
)
|
||||
flag.StringVar(
|
||||
@@ -139,7 +139,7 @@ func init() {
|
||||
flag.StringVar(
|
||||
&test.VeleroCfg.MigrateFromVeleroVersion,
|
||||
"migrate-from-velero-version",
|
||||
"self",
|
||||
"v1.17.1",
|
||||
"comma-separated list of Velero version to be tested with on source cluster.",
|
||||
)
|
||||
flag.StringVar(
|
||||
@@ -727,6 +727,36 @@ func TestE2e(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the Velero version
|
||||
if len(test.VeleroCfg.VeleroVersion) > 0 {
|
||||
if err := veleroutil.ValidateVeleroVersion(test.VeleroCfg.VeleroVersion); err != nil {
|
||||
fmt.Println("VeleroVersion is invalid: ", test.VeleroCfg.VeleroVersion)
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the UpgradeFromVeleroVersion if provided
|
||||
if len(test.VeleroCfg.UpgradeFromVeleroVersion) > 0 {
|
||||
versions := strings.Split(test.VeleroCfg.UpgradeFromVeleroVersion, ",")
|
||||
for _, version := range versions {
|
||||
if err := veleroutil.ValidateVeleroVersion(version); err != nil {
|
||||
fmt.Println("UpgradeFromVeleroVersion is invalid: ", version)
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the MigrateFromVeleroVersion if provided
|
||||
if len(test.VeleroCfg.MigrateFromVeleroVersion) > 0 {
|
||||
versions := strings.Split(test.VeleroCfg.MigrateFromVeleroVersion, ",")
|
||||
for _, version := range versions {
|
||||
if err := veleroutil.ValidateVeleroVersion(version); err != nil {
|
||||
fmt.Println("MigrateFromVeleroVersion is invalid: ", version)
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = GetKubeConfigContext(); err != nil {
|
||||
fmt.Println(err)
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/vmware-tanzu/velero/test"
|
||||
framework "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
@@ -47,7 +46,8 @@ type migrationE2E struct {
|
||||
func MigrationWithSnapshots() {
|
||||
for _, veleroCLI2Version := range veleroutil.GetVersionList(
|
||||
test.VeleroCfg.MigrateFromVeleroCLI,
|
||||
test.VeleroCfg.MigrateFromVeleroVersion) {
|
||||
test.VeleroCfg.MigrateFromVeleroVersion,
|
||||
) {
|
||||
framework.TestFunc(
|
||||
&migrationE2E{
|
||||
useVolumeSnapshots: true,
|
||||
@@ -60,7 +60,8 @@ func MigrationWithSnapshots() {
|
||||
func MigrationWithFS() {
|
||||
for _, veleroCLI2Version := range veleroutil.GetVersionList(
|
||||
test.VeleroCfg.MigrateFromVeleroCLI,
|
||||
test.VeleroCfg.MigrateFromVeleroVersion) {
|
||||
test.VeleroCfg.MigrateFromVeleroVersion,
|
||||
) {
|
||||
framework.TestFunc(
|
||||
&migrationE2E{
|
||||
useVolumeSnapshots: false,
|
||||
@@ -124,23 +125,26 @@ func (m *migrationE2E) Backup() error {
|
||||
var err error
|
||||
|
||||
if m.veleroCLI2Version.VeleroCLI == "" {
|
||||
//Assume tag of velero server image is identical to velero CLI version
|
||||
//Download velero CLI if it's empty according to velero CLI version
|
||||
// Assume tag of velero server image is identical to velero CLI version
|
||||
// Download velero CLI if it's empty according to velero CLI version
|
||||
By(
|
||||
fmt.Sprintf("Install the expected version Velero CLI %s",
|
||||
m.veleroCLI2Version.VeleroVersion),
|
||||
fmt.Sprintf(
|
||||
"Install the expected version Velero CLI %s",
|
||||
m.veleroCLI2Version.VeleroVersion,
|
||||
),
|
||||
func() {
|
||||
// "self" represents 1.14.x and future versions
|
||||
if m.veleroCLI2Version.VeleroVersion == "self" {
|
||||
OriginVeleroCfg, err = veleroutil.SetImagesToDefaultValues(
|
||||
OriginVeleroCfg,
|
||||
m.veleroCLI2Version.VeleroVersion,
|
||||
)
|
||||
Expect(err).To(Succeed(),
|
||||
"Fail to set images for the migrate-from Velero installation.")
|
||||
|
||||
// No need to download Velero CLI if the version is same as the VeleroVersion.
|
||||
// Uses the local built Velero CLI.
|
||||
if m.veleroCLI2Version.VeleroVersion == m.VeleroCfg.VeleroVersion {
|
||||
m.veleroCLI2Version.VeleroCLI = m.VeleroCfg.VeleroCLI
|
||||
} else {
|
||||
OriginVeleroCfg, err = veleroutil.SetImagesToDefaultValues(
|
||||
OriginVeleroCfg,
|
||||
m.veleroCLI2Version.VeleroVersion,
|
||||
)
|
||||
Expect(err).To(Succeed(),
|
||||
"Fail to set images for the migrate-from Velero installation.")
|
||||
|
||||
m.veleroCLI2Version.VeleroCLI, err = veleroutil.InstallVeleroCLI(
|
||||
m.Ctx,
|
||||
m.veleroCLI2Version.VeleroVersion)
|
||||
@@ -165,9 +169,11 @@ func (m *migrationE2E) Backup() error {
|
||||
version, err := veleroutil.GetVeleroVersion(m.Ctx, OriginVeleroCfg.VeleroCLI, true)
|
||||
Expect(err).To(Succeed(), "Fail to get Velero version")
|
||||
OriginVeleroCfg.VeleroVersion = version
|
||||
if OriginVeleroCfg.WorkerOS == common.WorkerOSWindows &&
|
||||
(version != "main" && semver.Compare(version, "v1.16") < 0) {
|
||||
Skip(fmt.Sprintf("Velero CLI version %s doesn't support Windows migration test.", version))
|
||||
if OriginVeleroCfg.WorkerOS == common.WorkerOSWindows {
|
||||
result, err := veleroutil.VersionNoOlderThan(version, "v1.16")
|
||||
if err != nil || !result {
|
||||
Skip(fmt.Sprintf("Velero CLI version %s doesn't support Windows migration test.", version))
|
||||
}
|
||||
}
|
||||
|
||||
if OriginVeleroCfg.SnapshotMoveData {
|
||||
@@ -175,13 +181,11 @@ func (m *migrationE2E) Backup() error {
|
||||
}
|
||||
|
||||
Expect(veleroutil.VeleroInstall(m.Ctx, &OriginVeleroCfg, false)).To(Succeed())
|
||||
if m.veleroCLI2Version.VeleroVersion != "self" {
|
||||
Expect(veleroutil.CheckVeleroVersion(
|
||||
m.Ctx,
|
||||
OriginVeleroCfg.VeleroCLI,
|
||||
OriginVeleroCfg.MigrateFromVeleroVersion,
|
||||
)).To(Succeed())
|
||||
}
|
||||
Expect(veleroutil.CheckVeleroVersion(
|
||||
m.Ctx,
|
||||
OriginVeleroCfg.VeleroCLI,
|
||||
OriginVeleroCfg.MigrateFromVeleroVersion,
|
||||
)).To(Succeed())
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -311,6 +312,80 @@ func cleanVSpherePluginConfig(c clientset.Interface, ns, secretName, configMapNa
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateVeleroVersion checks if the given version is valid
|
||||
// version can be in the format of 'main', 'release-x.y(-dev)', or 'vX.Y(.Z)'
|
||||
func ValidateVeleroVersion(version string) error {
|
||||
mainRe := regexp.MustCompile(`^main$`)
|
||||
releaseRe := regexp.MustCompile(`^release-(\d)\.(\d)(-dev)?$`)
|
||||
tagRe := regexp.MustCompile(`^v(\d+)\.(\d+)(\.\d+)?$`)
|
||||
|
||||
if mainRe.MatchString(version) || releaseRe.MatchString(version) || tagRe.MatchString(version) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid Velero version: %s, Velero version must be 'main', 'release-x.y(-dev)', or 'vX.Y.Z'", version)
|
||||
}
|
||||
|
||||
// VersionNoOlderThan checks if the given version is no older than the targetVersion
|
||||
// version can be in the format of 'main', 'release-x.y(-dev)', or 'vX.Y(.Z)'
|
||||
// targetVersion must be in the format of 'main', or 'vX.Y.(Z)'
|
||||
// return true if version is no older than targetVersion
|
||||
func VersionNoOlderThan(version string, targetVersion string) (bool, error) {
|
||||
mainRe := regexp.MustCompile(`^main$`)
|
||||
releaseRe := regexp.MustCompile(`^release-(\d)\.(\d)(-dev)?$`)
|
||||
tagRe := regexp.MustCompile(`^v(\d)\.(\d)(\.\d+)?$`)
|
||||
|
||||
if err := ValidateVeleroVersion(version); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !tagRe.MatchString(targetVersion) && !mainRe.MatchString(targetVersion) {
|
||||
return false, fmt.Errorf("targetVersion is invalid. it must be in the format of 'main', or 'vX.Y.(Z)'.")
|
||||
}
|
||||
|
||||
fmt.Printf("version: %s, targetVersion: %s\n", version, targetVersion)
|
||||
|
||||
switch {
|
||||
case mainRe.MatchString(version):
|
||||
// main is always the latest
|
||||
return true, nil
|
||||
|
||||
case releaseRe.MatchString(version):
|
||||
// release-x.y(-dev) is treated as vX.Y.0
|
||||
matches := releaseRe.FindStringSubmatch(version)
|
||||
major := matches[1]
|
||||
minor := matches[2]
|
||||
|
||||
switch {
|
||||
case mainRe.MatchString(targetVersion):
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
matches := tagRe.FindStringSubmatch(targetVersion)
|
||||
targetMajor := matches[1]
|
||||
targetMinor := matches[2]
|
||||
if major > targetMajor && minor >= targetMinor {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
case tagRe.MatchString(version):
|
||||
switch {
|
||||
case mainRe.MatchString(targetVersion):
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
if semver.Compare(version, targetVersion) >= 0 {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("unknown error in VersionNoOlderThan")
|
||||
}
|
||||
|
||||
func installVeleroServer(
|
||||
ctx context.Context,
|
||||
cli string,
|
||||
@@ -333,10 +408,12 @@ func installVeleroServer(
|
||||
// TODO: need to consider align options.UseNodeAgentWindows usage
|
||||
// with options.UseNodeAgent
|
||||
// Only version after v1.16.0 support windows node agent.
|
||||
if options.WorkerOS == common.WorkerOSWindows &&
|
||||
(semver.Compare(version, "v1.16") >= 0 || version == "main") {
|
||||
fmt.Println("Install node-agent-windows. The Velero version is ", version)
|
||||
args = append(args, "--use-node-agent-windows")
|
||||
if options.WorkerOS == common.WorkerOSWindows {
|
||||
result, err := VersionNoOlderThan(version, "v1.16")
|
||||
if err == nil && result {
|
||||
fmt.Println("Install node-agent-windows. The Velero version is ", version)
|
||||
args = append(args, "--use-node-agent-windows")
|
||||
}
|
||||
}
|
||||
|
||||
if options.DefaultVolumesToFsBackup {
|
||||
@@ -452,10 +529,12 @@ func installVeleroServer(
|
||||
}
|
||||
|
||||
// Only version no older than v1.15 support --backup-repository-configmap.
|
||||
if options.BackupRepoConfigMap != "" &&
|
||||
(semver.Compare(version, "v1.15") >= 0 || version == "main") {
|
||||
fmt.Println("Associate backup repository ConfigMap. The Velero version is ", version)
|
||||
args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", options.BackupRepoConfigMap))
|
||||
if options.BackupRepoConfigMap != "" {
|
||||
result, err := VersionNoOlderThan(version, "v1.15")
|
||||
if err == nil && result {
|
||||
fmt.Println("Associate backup repository ConfigMap. The Velero version is ", version)
|
||||
args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", options.BackupRepoConfigMap))
|
||||
}
|
||||
}
|
||||
|
||||
if options.RepoMaintenanceJobConfigMap != "" {
|
||||
|
||||
@@ -128,7 +128,11 @@ func SetImagesToDefaultValues(config VeleroConfig, version string) (VeleroConfig
|
||||
|
||||
ret.Plugins = ""
|
||||
|
||||
versionWithoutPatch := semver.MajorMinor(version)
|
||||
versionWithoutPatch := "main"
|
||||
if version != "main" {
|
||||
versionWithoutPatch = semver.MajorMinor(version)
|
||||
}
|
||||
|
||||
// Read migration case needs images from the PluginsMatrix map.
|
||||
images, ok := ImagesMatrix[versionWithoutPatch]
|
||||
if !ok {
|
||||
@@ -153,12 +157,6 @@ func SetImagesToDefaultValues(config VeleroConfig, version string) (VeleroConfig
|
||||
ret.Plugins = images[AWS][0]
|
||||
}
|
||||
|
||||
// Because Velero CSI plugin is deprecated in v1.14,
|
||||
// only need to install it for version lower than v1.14.
|
||||
if strings.Contains(ret.Features, FeatureCSI) &&
|
||||
semver.Compare(versionWithoutPatch, "v1.14") < 0 {
|
||||
ret.Plugins = ret.Plugins + "," + images[CSI][0]
|
||||
}
|
||||
if ret.SnapshotMoveData && ret.CloudProvider == Azure {
|
||||
ret.Plugins = ret.Plugins + "," + images[AWS][0]
|
||||
}
|
||||
@@ -1567,9 +1565,6 @@ func RestorePVRNum(ctx context.Context, veleroNamespace, restoreName string) (in
|
||||
}
|
||||
|
||||
func IsSupportUploaderType(version string) (bool, error) {
|
||||
if strings.Contains(version, "self") {
|
||||
return true, nil
|
||||
}
|
||||
verSupportUploaderType, err := ver.ParseSemantic("v1.10.0")
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
||||
Reference in New Issue
Block a user