mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-05 13:05:17 +00:00
exposer supports cache volume
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
This commit is contained in:
80
pkg/exposer/cache_volume_test.go
Normal file
80
pkg/exposer/cache_volume_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package exposer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetCacheVolumeSize(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dataSize int64
|
||||
info *CacheConfigs
|
||||
expected int64
|
||||
}{
|
||||
{
|
||||
name: "nil info",
|
||||
dataSize: 1024,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "0 data size",
|
||||
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 5120},
|
||||
expected: 2 << 30,
|
||||
},
|
||||
{
|
||||
name: "0 threshold",
|
||||
dataSize: 2048,
|
||||
info: &CacheConfigs{Limit: 1 << 30},
|
||||
expected: 2 << 30,
|
||||
},
|
||||
{
|
||||
name: "data size is smaller",
|
||||
dataSize: 2048,
|
||||
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 5120},
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "data size is lager",
|
||||
dataSize: 2048,
|
||||
info: &CacheConfigs{Limit: 1 << 30, ResidentThreshold: 1024},
|
||||
expected: 2 << 30,
|
||||
},
|
||||
{
|
||||
name: "limit smaller than 1G",
|
||||
dataSize: 2048,
|
||||
info: &CacheConfigs{Limit: 5120, ResidentThreshold: 1024},
|
||||
expected: 1 << 30,
|
||||
},
|
||||
{
|
||||
name: "larger than 1G after inflate",
|
||||
dataSize: 2048,
|
||||
info: &CacheConfigs{Limit: (1 << 30) - 1024, ResidentThreshold: 1024},
|
||||
expected: 2 << 30,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
size := getCacheVolumeSize(test.dataSize, test.info)
|
||||
require.Equal(t, test.expected, size)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -316,19 +316,12 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
||||
diag += fmt.Sprintf("error getting restore pvc %s, err: %v\n", restorePVCName, err)
|
||||
}
|
||||
|
||||
var cachePVC *corev1api.PersistentVolumeClaim
|
||||
if pod.Spec.Volumes != nil {
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if v.Name == cacheVolumeName {
|
||||
cachePVC, err = e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cachePVC = nil
|
||||
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
cachePVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cachePVC = nil
|
||||
|
||||
if !apierrors.IsNotFound(err) {
|
||||
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -105,6 +106,10 @@ func TestRestoreExpose(t *testing.T) {
|
||||
targetPVCName string
|
||||
targetNamespace string
|
||||
kubeReactors []reactor
|
||||
cacheVolume *CacheConfigs
|
||||
expectBackupPod bool
|
||||
expectBackupPVC bool
|
||||
expectCachePVC bool
|
||||
err string
|
||||
}{
|
||||
{
|
||||
@@ -167,6 +172,70 @@ func TestRestoreExpose(t *testing.T) {
|
||||
},
|
||||
err: "error to create restore pvc: fake-create-error",
|
||||
},
|
||||
{
|
||||
name: "succeed",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
daemonSet,
|
||||
storageClass,
|
||||
},
|
||||
expectBackupPod: true,
|
||||
expectBackupPVC: true,
|
||||
},
|
||||
{
|
||||
name: "succeed, cache config, no cache volume",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
daemonSet,
|
||||
storageClass,
|
||||
},
|
||||
cacheVolume: &CacheConfigs{},
|
||||
expectBackupPod: true,
|
||||
expectBackupPVC: true,
|
||||
},
|
||||
{
|
||||
name: "create cache volume fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
daemonSet,
|
||||
storageClass,
|
||||
},
|
||||
cacheVolume: &CacheConfigs{Limit: 1024},
|
||||
kubeReactors: []reactor{
|
||||
{
|
||||
verb: "create",
|
||||
resource: "persistentvolumeclaims",
|
||||
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, errors.New("fake-create-error")
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "error to create cache pvc: fake-create-error",
|
||||
},
|
||||
{
|
||||
name: "succeed with cache volume",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
daemonSet,
|
||||
storageClass,
|
||||
},
|
||||
cacheVolume: &CacheConfigs{Limit: 1024},
|
||||
expectBackupPod: true,
|
||||
expectBackupPVC: true,
|
||||
expectCachePVC: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -203,9 +272,36 @@ func TestRestoreExpose(t *testing.T) {
|
||||
Resources: corev1api.ResourceRequirements{},
|
||||
ExposeTimeout: time.Millisecond,
|
||||
LoadAffinity: nil,
|
||||
CacheVolume: test.cacheVolume,
|
||||
},
|
||||
)
|
||||
require.EqualError(t, err, test.err)
|
||||
|
||||
if test.err != "" {
|
||||
require.EqualError(t, err, test.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
|
||||
if test.expectBackupPod {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.True(t, apierrors.IsNotFound(err))
|
||||
}
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
|
||||
if test.expectBackupPVC {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.True(t, apierrors.IsNotFound(err))
|
||||
}
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), getCachePVCName(ownerObject), metav1.GetOptions{})
|
||||
if test.expectCachePVC {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.True(t, apierrors.IsNotFound(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -651,6 +747,38 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
cachePVCWithVolumeName := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore-cache",
|
||||
UID: "fake-cache-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: restore.APIVersion,
|
||||
Kind: restore.Kind,
|
||||
Name: restore.Name,
|
||||
UID: restore.UID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv-cache",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
}
|
||||
|
||||
cachePV := corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pv-cache",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeStatus{
|
||||
Phase: corev1api.VolumePending,
|
||||
Message: "fake-pv-message",
|
||||
},
|
||||
}
|
||||
|
||||
nodeAgentPod := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
@@ -762,6 +890,44 @@ Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "cache pvc with volume name, no pv",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&cachePVCWithVolumeName,
|
||||
&nodeAgentPod,
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
error getting restore pv fake-pv, err: persistentvolumes "fake-pv" not found
|
||||
PVC velero/fake-restore-cache, phase Pending, binding to fake-pv-cache
|
||||
error getting cache pv fake-pv-cache, err: persistentvolumes "fake-pv-cache" not found
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
name: "cache pvc with volume name, pv exists",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&restorePodWithNodeName,
|
||||
&restorePVCWithVolumeName,
|
||||
&cachePVCWithVolumeName,
|
||||
&restorePV,
|
||||
&cachePV,
|
||||
&nodeAgentPod,
|
||||
},
|
||||
expected: `begin diagnose restore exposer
|
||||
Pod velero/fake-restore, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-restore, phase Pending, binding to fake-pv
|
||||
PV fake-pv, phase Pending, reason , message fake-pv-message
|
||||
PVC velero/fake-restore-cache, phase Pending, binding to fake-pv-cache
|
||||
PV fake-pv-cache, phase Pending, reason , message fake-pv-message
|
||||
end diagnose restore exposer`,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -279,19 +279,12 @@ func (e *podVolumeExposer) DiagnoseExpose(ctx context.Context, ownerObject corev
|
||||
diag += fmt.Sprintf("error getting hosting pod %s, err: %v\n", hostingPodName, err)
|
||||
}
|
||||
|
||||
var cachePVC *corev1api.PersistentVolumeClaim
|
||||
if pod.Spec.Volumes != nil {
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if v.Name == cacheVolumeName {
|
||||
cachePVC, err = e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cachePVC = nil
|
||||
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
cachePVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(ctx, getCachePVCName(ownerObject), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cachePVC = nil
|
||||
|
||||
if !apierrors.IsNotFound(err) {
|
||||
diag += fmt.Sprintf("error getting cache pvc %s, err: %v\n", getCachePVCName(ownerObject), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,10 +11,12 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clientTesting "k8s.io/client-go/testing"
|
||||
clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
@@ -72,6 +74,9 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
exposeParam PodVolumeExposeParam
|
||||
funcGetPodVolumeHostPath func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error)
|
||||
funcExtractPodVolumeHostPath func(context.Context, string, kubernetes.Interface, string, string) (string, error)
|
||||
kubeReactors []reactor
|
||||
expectBackupPod bool
|
||||
expectCachePVC bool
|
||||
err string
|
||||
}{
|
||||
{
|
||||
@@ -189,6 +194,7 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
expectBackupPod: true,
|
||||
},
|
||||
{
|
||||
name: "succeed with privileged pod",
|
||||
@@ -212,6 +218,89 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
expectBackupPod: true,
|
||||
},
|
||||
{
|
||||
name: "succeed, cache config, no cache volume",
|
||||
ownerBackup: backup,
|
||||
exposeParam: PodVolumeExposeParam{
|
||||
ClientNamespace: "fake-ns",
|
||||
ClientPodName: "fake-client-pod",
|
||||
ClientPodVolume: "fake-client-volume",
|
||||
CacheVolume: &CacheConfigs{},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
podWithNode,
|
||||
node,
|
||||
daemonSet,
|
||||
},
|
||||
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
return datapath.AccessPoint{
|
||||
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
|
||||
}, nil
|
||||
},
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
expectBackupPod: true,
|
||||
},
|
||||
{
|
||||
name: "create cache volume fail",
|
||||
ownerBackup: backup,
|
||||
exposeParam: PodVolumeExposeParam{
|
||||
ClientNamespace: "fake-ns",
|
||||
ClientPodName: "fake-client-pod",
|
||||
ClientPodVolume: "fake-client-volume",
|
||||
CacheVolume: &CacheConfigs{Limit: 1024},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
podWithNode,
|
||||
node,
|
||||
daemonSet,
|
||||
},
|
||||
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
return datapath.AccessPoint{
|
||||
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
|
||||
}, nil
|
||||
},
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
kubeReactors: []reactor{
|
||||
{
|
||||
verb: "create",
|
||||
resource: "persistentvolumeclaims",
|
||||
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, errors.New("fake-create-error")
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "error to create cache pvc: fake-create-error",
|
||||
},
|
||||
{
|
||||
name: "succeed with cache volume",
|
||||
ownerBackup: backup,
|
||||
exposeParam: PodVolumeExposeParam{
|
||||
ClientNamespace: "fake-ns",
|
||||
ClientPodName: "fake-client-pod",
|
||||
ClientPodVolume: "fake-client-volume",
|
||||
CacheVolume: &CacheConfigs{Limit: 1024},
|
||||
},
|
||||
kubeClientObj: []runtime.Object{
|
||||
podWithNode,
|
||||
node,
|
||||
daemonSet,
|
||||
},
|
||||
funcGetPodVolumeHostPath: func(context.Context, *corev1api.Pod, string, kubernetes.Interface, filesystem.Interface, logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
return datapath.AccessPoint{
|
||||
ByPath: "/host_pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount",
|
||||
}, nil
|
||||
},
|
||||
funcExtractPodVolumeHostPath: func(context.Context, string, kubernetes.Interface, string, string) (string, error) {
|
||||
return "/var/lib/kubelet/pods/pod-id-xxx/volumes/kubernetes.io~csi/pvc-id-xxx/mount", nil
|
||||
},
|
||||
expectBackupPod: true,
|
||||
expectCachePVC: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -219,6 +308,10 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
|
||||
|
||||
for _, reactor := range test.kubeReactors {
|
||||
fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc)
|
||||
}
|
||||
|
||||
exposer := podVolumeExposer{
|
||||
kubeClient: fakeKubeClient,
|
||||
log: velerotest.NewLogger(),
|
||||
@@ -248,9 +341,23 @@ func TestPodVolumeExpose(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
assert.EqualError(t, err, test.err)
|
||||
require.EqualError(t, err, test.err)
|
||||
}
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(t.Context(), ownerObject.Name, metav1.GetOptions{})
|
||||
if test.expectBackupPod {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.True(t, apierrors.IsNotFound(err))
|
||||
}
|
||||
|
||||
_, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(t.Context(), getCachePVCName(ownerObject), metav1.GetOptions{})
|
||||
if test.expectCachePVC {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.True(t, apierrors.IsNotFound(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -517,6 +624,38 @@ func TestPodVolumeDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
cachePVCWithVolumeName := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup-cache",
|
||||
UID: "fake-cache-pvc-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
Kind: backup.Kind,
|
||||
Name: backup.Name,
|
||||
UID: backup.UID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv-cache",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
}
|
||||
|
||||
cachePV := corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pv-cache",
|
||||
},
|
||||
Status: corev1api.PersistentVolumeStatus{
|
||||
Phase: corev1api.VolumePending,
|
||||
Message: "fake-pv-message",
|
||||
},
|
||||
}
|
||||
|
||||
nodeAgentPod := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
@@ -589,6 +728,37 @@ end diagnose pod volume exposer`,
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "cache pvc with volume name, no pv",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&cachePVCWithVolumeName,
|
||||
&nodeAgentPod,
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-backup-cache, phase Pending, binding to fake-pv-cache
|
||||
error getting cache pv fake-pv-cache, err: persistentvolumes "fake-pv-cache" not found
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
name: "cache pvc with volume name, pv exists",
|
||||
ownerBackup: backup,
|
||||
kubeClientObj: []runtime.Object{
|
||||
&backupPodWithNodeName,
|
||||
&cachePVCWithVolumeName,
|
||||
&cachePV,
|
||||
&nodeAgentPod,
|
||||
},
|
||||
expected: `begin diagnose pod volume exposer
|
||||
Pod velero/fake-backup, phase Pending, node name fake-node
|
||||
Pod condition Initialized, status True, reason , message fake-pod-message
|
||||
PVC velero/fake-backup-cache, phase Pending, binding to fake-pv-cache
|
||||
PV fake-pv-cache, phase Pending, reason , message fake-pv-message
|
||||
end diagnose pod volume exposer`,
|
||||
},
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user