mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-06 21:36:30 +00:00
data mover restore for Windows
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
This commit is contained in:
1
changelogs/unreleased/8594-Lyndon-Li
Normal file
1
changelogs/unreleased/8594-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Data mover restore for Windows
|
||||
@@ -92,6 +92,13 @@ spec:
|
||||
DataMover specifies the data mover to be used by the backup.
|
||||
If DataMover is "" or "velero", the built-in data mover will be used.
|
||||
type: string
|
||||
nodeOS:
|
||||
description: NodeOS is OS of the node where the DataDownload is processed.
|
||||
enum:
|
||||
- auto
|
||||
- linux
|
||||
- windows
|
||||
type: string
|
||||
operationTimeout:
|
||||
description: |-
|
||||
OperationTimeout specifies the time used to wait internal operations,
|
||||
|
||||
@@ -144,7 +144,8 @@ spec:
|
||||
description: DataUploadStatus is the current status of a DataUpload.
|
||||
properties:
|
||||
acceptedByNode:
|
||||
description: Node is name of the node where the DataUpload is prepared.
|
||||
description: AcceptedByNode is name of the node where the DataUpload
|
||||
is prepared.
|
||||
type: string
|
||||
acceptedTimestamp:
|
||||
description: |-
|
||||
@@ -175,6 +176,13 @@ spec:
|
||||
node:
|
||||
description: Node is name of the node where the DataUpload is processed.
|
||||
type: string
|
||||
nodeOS:
|
||||
description: NodeOS is OS of the node where the DataUpload is processed.
|
||||
enum:
|
||||
- auto
|
||||
- linux
|
||||
- windows
|
||||
type: string
|
||||
path:
|
||||
description: Path is the full path of the snapshot volume being backed
|
||||
up.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -54,6 +54,10 @@ type DataDownloadSpec struct {
|
||||
// OperationTimeout specifies the time used to wait internal operations,
|
||||
// before returning error as timeout.
|
||||
OperationTimeout metav1.Duration `json:"operationTimeout"`
|
||||
|
||||
// NodeOS is OS of the node where the DataDownload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
}
|
||||
|
||||
// TargetVolumeSpec is the specification for a target PVC.
|
||||
|
||||
@@ -96,6 +96,16 @@ const (
|
||||
DataUploadPhaseFailed DataUploadPhase = "Failed"
|
||||
)
|
||||
|
||||
// NodeOS represents OS of a node.
|
||||
// +kubebuilder:validation:Enum=auto;linux;windows
|
||||
type NodeOS string
|
||||
|
||||
const (
|
||||
NodeOSLinux NodeOS = "linux"
|
||||
NodeOSWindows NodeOS = "windows"
|
||||
NodeOSAuto NodeOS = "auto"
|
||||
)
|
||||
|
||||
// DataUploadStatus is the current status of a DataUpload.
|
||||
type DataUploadStatus struct {
|
||||
// Phase is the current state of the DataUpload.
|
||||
@@ -144,7 +154,12 @@ type DataUploadStatus struct {
|
||||
// Node is name of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
Node string `json:"node,omitempty"`
|
||||
// Node is name of the node where the DataUpload is prepared.
|
||||
|
||||
// NodeOS is OS of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
|
||||
// AcceptedByNode is name of the node where the DataUpload is prepared.
|
||||
// +optional
|
||||
AcceptedByNode string `json:"acceptedByNode,omitempty"`
|
||||
|
||||
@@ -221,4 +236,8 @@ type DataUploadResult struct {
|
||||
// +optional
|
||||
// +nullable
|
||||
DataMoverResult *map[string]string `json:"dataMoverResult,omitempty"`
|
||||
|
||||
// NodeOS is OS of the node where the DataUpload is processed.
|
||||
// +optional
|
||||
NodeOS NodeOS `json:"nodeOS,omitempty"`
|
||||
}
|
||||
|
||||
@@ -148,6 +148,12 @@ func (d *DataDownloadBuilder) Node(node string) *DataDownloadBuilder {
|
||||
return d
|
||||
}
|
||||
|
||||
// NodeOS sets the DataDownload's Node OS.
|
||||
func (d *DataDownloadBuilder) NodeOS(nodeOS velerov2alpha1api.NodeOS) *DataDownloadBuilder {
|
||||
d.object.Spec.NodeOS = nodeOS
|
||||
return d
|
||||
}
|
||||
|
||||
// AcceptedByNode sets the DataDownload's AcceptedByNode.
|
||||
func (d *DataDownloadBuilder) AcceptedByNode(node string) *DataDownloadBuilder {
|
||||
d.object.Status.AcceptedByNode = node
|
||||
|
||||
@@ -151,6 +151,12 @@ func (d *DataUploadBuilder) Node(node string) *DataUploadBuilder {
|
||||
return d
|
||||
}
|
||||
|
||||
// NodeOS sets the DataUpload's Node OS.
|
||||
func (d *DataUploadBuilder) NodeOS(nodeOS velerov2alpha1api.NodeOS) *DataUploadBuilder {
|
||||
d.object.Status.NodeOS = nodeOS
|
||||
return d
|
||||
}
|
||||
|
||||
// AcceptedByNode sets the DataUpload's AcceptedByNode.
|
||||
func (d *DataUploadBuilder) AcceptedByNode(node string) *DataUploadBuilder {
|
||||
d.object.Status.AcceptedByNode = node
|
||||
|
||||
@@ -160,7 +160,24 @@ func newdataMoverRestore(logger logrus.FieldLogger, factory client.Factory, conf
|
||||
return nil, errors.Wrap(err, "error to create client")
|
||||
}
|
||||
|
||||
cache, err := ctlcache.New(clientConfig, cacheOption)
|
||||
var cache ctlcache.Cache
|
||||
retry := 10
|
||||
for {
|
||||
cache, err = ctlcache.New(clientConfig, cacheOption)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
retry--
|
||||
if retry == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
logger.WithError(err).Warn("Failed to create client cache, need retry")
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
cancelFunc()
|
||||
return nil, errors.Wrap(err, "error to create client cache")
|
||||
|
||||
@@ -183,28 +183,15 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k, kube.NodeOSLinux); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
exposeParam, err := r.setupExposeParam(dd)
|
||||
if err != nil {
|
||||
return r.errorOut(ctx, dd, err, "failed to set exposer parameters", log)
|
||||
}
|
||||
|
||||
// Expose() will trigger to create one pod whose volume is restored by a given volume snapshot,
|
||||
// but the pod maybe is not in the same node of the current controller, so we need to return it here.
|
||||
// And then only the controller who is in the same node could do the rest work.
|
||||
err = r.restoreExposer.Expose(ctx, getDataDownloadOwnerObject(dd), exposer.GenericRestoreExposeParam{
|
||||
TargetPVCName: dd.Spec.TargetVolume.PVC,
|
||||
SourceNamespace: dd.Spec.TargetVolume.Namespace,
|
||||
HostingPodLabels: hostingPodLabels,
|
||||
Resources: r.podResources,
|
||||
ExposeTimeout: dd.Spec.OperationTimeout.Duration,
|
||||
RestorePVCConfig: r.restorePVCConfig,
|
||||
})
|
||||
err = r.restoreExposer.Expose(ctx, getDataDownloadOwnerObject(dd), exposeParam)
|
||||
if err != nil {
|
||||
if err := r.client.Get(ctx, req.NamespacedName, dd); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
@@ -243,7 +230,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
||||
log.Debugf("Data download is been canceled %s in Phase %s", dd.GetName(), dd.Status.Phase)
|
||||
r.tryCancelAcceptedDataDownload(ctx, dd, "")
|
||||
} else if peekErr := r.restoreExposer.PeekExposed(ctx, getDataDownloadOwnerObject(dd)); peekErr != nil {
|
||||
r.tryCancelAcceptedDataDownload(ctx, dd, fmt.Sprintf("found a dataupload %s/%s with expose error: %s. mark it as cancel", dd.Namespace, dd.Name, peekErr))
|
||||
r.tryCancelAcceptedDataDownload(ctx, dd, fmt.Sprintf("found a datadownload %s/%s with expose error: %s. mark it as cancel", dd.Namespace, dd.Name, peekErr))
|
||||
log.Errorf("Cancel dd %s/%s because of expose error %s", dd.Namespace, dd.Name, peekErr)
|
||||
} else if dd.Status.AcceptedTimestamp != nil {
|
||||
if time.Since(dd.Status.AcceptedTimestamp.Time) >= r.preparingTimeout {
|
||||
@@ -737,6 +724,42 @@ func (r *DataDownloadReconciler) closeDataPath(ctx context.Context, ddName strin
|
||||
r.dataPathMgr.RemoveAsyncBR(ddName)
|
||||
}
|
||||
|
||||
func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDownload) (exposer.GenericRestoreExposeParam, error) {
|
||||
log := r.logger.WithField("datadownload", dd.Name)
|
||||
|
||||
nodeOS := string(dd.Spec.NodeOS)
|
||||
if nodeOS == "" {
|
||||
log.Info("nodeOS is empty in DD, fallback to linux")
|
||||
nodeOS = kube.NodeOSLinux
|
||||
}
|
||||
|
||||
if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil {
|
||||
return exposer.GenericRestoreExposeParam{}, errors.Wrapf(err, "no appropriate node to run datadownload %s/%s", dd.Namespace, dd.Name)
|
||||
}
|
||||
|
||||
hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, dd.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return exposer.GenericRestoreExposeParam{
|
||||
TargetPVCName: dd.Spec.TargetVolume.PVC,
|
||||
TargetNamespace: dd.Spec.TargetVolume.Namespace,
|
||||
HostingPodLabels: hostingPodLabels,
|
||||
Resources: r.podResources,
|
||||
OperationTimeout: dd.Spec.OperationTimeout.Duration,
|
||||
ExposeTimeout: r.preparingTimeout,
|
||||
NodeOS: nodeOS,
|
||||
RestorePVCConfig: r.restorePVCConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getDataDownloadOwnerObject(dd *velerov2alpha1api.DataDownload) v1.ObjectReference {
|
||||
return v1.ObjectReference{
|
||||
Kind: dd.Kind,
|
||||
|
||||
@@ -53,6 +53,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
exposermockes "github.com/vmware-tanzu/velero/pkg/exposer/mocks"
|
||||
)
|
||||
@@ -67,7 +68,7 @@ func dataDownloadBuilder() *builder.DataDownloadBuilder {
|
||||
PV: "test-pv",
|
||||
PVC: "test-pvc",
|
||||
Namespace: "test-ns",
|
||||
})
|
||||
}).NodeOS(velerov2alpha1api.NodeOS("linux"))
|
||||
}
|
||||
|
||||
func initDataDownloadReconciler(objects []runtime.Object, needError ...bool) (*DataDownloadReconciler, error) {
|
||||
@@ -167,6 +168,8 @@ func TestDataDownloadReconcile(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
node := builder.ForNode("fake-node").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dd *velerov2alpha1api.DataDownload
|
||||
@@ -326,9 +329,15 @@ func TestDataDownloadReconcile(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Restore is exposed",
|
||||
dd: dataDownloadBuilder().Result(),
|
||||
dd: dataDownloadBuilder().NodeOS(velerov2alpha1api.NodeOSLinux).Result(),
|
||||
targetPVC: builder.ForPersistentVolumeClaim("test-ns", "test-pvc").Result(),
|
||||
},
|
||||
{
|
||||
name: "Expected node doesn't exist",
|
||||
dd: dataDownloadBuilder().NodeOS(velerov2alpha1api.NodeOSWindows).Result(),
|
||||
targetPVC: builder.ForPersistentVolumeClaim("test-ns", "test-pvc").Result(),
|
||||
expectedStatusMsg: "no appropriate node to run datadownload",
|
||||
},
|
||||
{
|
||||
name: "Get empty restore exposer",
|
||||
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(),
|
||||
@@ -388,9 +397,9 @@ func TestDataDownloadReconcile(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
objs := []runtime.Object{daemonSet, node}
|
||||
if test.targetPVC != nil {
|
||||
objs = []runtime.Object{test.targetPVC, daemonSet}
|
||||
objs = append(objs, test.targetPVC)
|
||||
}
|
||||
r, err := initDataDownloadReconciler(objs, test.needErrs...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -285,6 +285,10 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if res.ByPod.NodeOS == nil {
|
||||
return r.errorOut(ctx, du, errors.New("unsupported ambiguous node OS"), "invalid expose result", log)
|
||||
}
|
||||
|
||||
log.Info("Exposed snapshot is ready and creating data path routine")
|
||||
|
||||
// Need to first create file system BR and get data path instance then update data upload status
|
||||
@@ -317,6 +321,7 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
original := du.DeepCopy()
|
||||
du.Status.Phase = velerov2alpha1api.DataUploadPhaseInProgress
|
||||
du.Status.StartTimestamp = &metav1.Time{Time: r.Clock.Now()}
|
||||
du.Status.NodeOS = velerov2alpha1api.NodeOS(*res.ByPod.NodeOS)
|
||||
if err := r.client.Patch(ctx, du, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Warnf("Failed to update dataupload %s to InProgress, will data path close and retry", du.Name)
|
||||
|
||||
@@ -792,6 +797,8 @@ func (r *DataUploadReconciler) closeDataPath(ctx context.Context, duName string)
|
||||
}
|
||||
|
||||
func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload) (interface{}, error) {
|
||||
log := r.logger.WithField("dataupload", du.Name)
|
||||
|
||||
if du.Spec.SnapshotType == velerov2alpha1api.SnapshotTypeCSI {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
err := r.client.Get(context.Background(), types.NamespacedName{
|
||||
@@ -803,7 +810,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
|
||||
nodeOS, err := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), r.logger)
|
||||
nodeOS, err := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), log)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get attaching node OS for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
|
||||
}
|
||||
@@ -821,7 +828,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
for _, k := range util.ThirdPartyLabels {
|
||||
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
|
||||
if err != nodeagent.ErrNodeAgentLabelNotFound {
|
||||
r.logger.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
|
||||
}
|
||||
} else {
|
||||
hostingPodLabels[k] = v
|
||||
@@ -843,6 +850,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
||||
NodeOS: nodeOS,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -166,6 +166,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
|
||||
RestoreSize: &restoreSize,
|
||||
},
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
@@ -265,9 +266,10 @@ func dataUploadBuilder() *builder.DataUploadBuilder {
|
||||
}
|
||||
|
||||
type fakeSnapshotExposer struct {
|
||||
kubeClient kbclient.Client
|
||||
clock clock.WithTickerAndDelayedExecution
|
||||
peekErr error
|
||||
kubeClient kbclient.Client
|
||||
clock clock.WithTickerAndDelayedExecution
|
||||
ambiguousNodeOS bool
|
||||
peekErr error
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.ObjectReference, param interface{}) error {
|
||||
@@ -296,7 +298,13 @@ func (f *fakeSnapshotExposer) GetExposed(ctx context.Context, du corev1.ObjectRe
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: pod, VolumeName: dataUploadName}}, nil
|
||||
|
||||
nodeOS := "linux"
|
||||
pNodeOS := &nodeOS
|
||||
if f.ambiguousNodeOS {
|
||||
pNodeOS = nil
|
||||
}
|
||||
return &exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: pod, VolumeName: dataUploadName, NodeOS: pNodeOS}}, nil
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1.ObjectReference) error {
|
||||
@@ -350,6 +358,8 @@ func TestReconcile(t *testing.T) {
|
||||
expectedRequeue ctrl.Result
|
||||
expectedErrMsg string
|
||||
needErrs []bool
|
||||
removeNode bool
|
||||
ambiguousNodeOS bool
|
||||
peekErr error
|
||||
notCreateFSBR bool
|
||||
fsBRInitErr error
|
||||
@@ -359,25 +369,29 @@ func TestReconcile(t *testing.T) {
|
||||
name: "Dataupload is not initialized",
|
||||
du: builder.ForDataUpload("unknown-ns", "unknown-name").Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
}, {
|
||||
},
|
||||
{
|
||||
name: "Error get Dataupload",
|
||||
du: builder.ForDataUpload(velerov1api.DefaultNamespace, "unknown-name").Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "getting DataUpload: Get error",
|
||||
needErrs: []bool{true, false, false, false},
|
||||
}, {
|
||||
},
|
||||
{
|
||||
name: "Unsupported data mover type",
|
||||
du: dataUploadBuilder().DataMover("unknown type").Result(),
|
||||
expected: dataUploadBuilder().Phase("").Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
}, {
|
||||
},
|
||||
{
|
||||
name: "Unknown type of snapshot exposer is not initialized",
|
||||
du: dataUploadBuilder().SnapshotType("unknown type").Result(),
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "unknown type type of snapshot exposer is not exist",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
name: "Dataupload should be accepted",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
@@ -394,6 +408,27 @@ func TestReconcile(t *testing.T) {
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "failed to get PVC",
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail to get PVC attaching node",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").StorageClass("fake-sc").Result(),
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "error to get storage class",
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail because expected node doesn't exist",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").Result(),
|
||||
removeNode: true,
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
expectedErrMsg: "no appropriate node to run data upload",
|
||||
},
|
||||
{
|
||||
name: "Dataupload should be prepared",
|
||||
du: dataUploadBuilder().SnapshotType(fakeSnapshotType).Result(),
|
||||
@@ -407,6 +442,15 @@ func TestReconcile(t *testing.T) {
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail if expose returns ambiguous nodeOS",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
ambiguousNodeOS: true,
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedErrMsg: "unsupported ambiguous node OS",
|
||||
},
|
||||
{
|
||||
name: "Dataupload with not enabled cancel",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
@@ -557,6 +601,11 @@ func TestReconcile(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if test.removeNode {
|
||||
err = r.kubeClient.CoreV1().Nodes().Delete(ctx, "fake-node", metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if test.dataMgr != nil {
|
||||
r.dataPathMgr = test.dataMgr
|
||||
} else {
|
||||
@@ -564,7 +613,7 @@ func TestReconcile(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.du.Spec.SnapshotType == fakeSnapshotType {
|
||||
r.snapshotExposerList = map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{fakeSnapshotType: &fakeSnapshotExposer{r.client, r.Clock, test.peekErr}}
|
||||
r.snapshotExposerList = map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{fakeSnapshotType: &fakeSnapshotExposer{r.client, r.Clock, test.ambiguousNodeOS, test.peekErr}}
|
||||
} else if test.du.Spec.SnapshotType == velerov2alpha1api.SnapshotTypeCSI {
|
||||
r.snapshotExposerList = map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{velerov2alpha1api.SnapshotTypeCSI: exposer.NewCSISnapshotExposer(r.kubeClient, r.csiSnapshotClient, velerotest.NewLogger())}
|
||||
}
|
||||
|
||||
@@ -281,10 +281,16 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
||||
|
||||
curLog.WithField("pod", pod.Name).Infof("Backup volume is found in pod at index %v", i)
|
||||
|
||||
var nodeOS *string
|
||||
if os, found := pod.Spec.NodeSelector[kube.NodeOSLabel]; found {
|
||||
nodeOS = &os
|
||||
}
|
||||
|
||||
return &ExposeResult{ByPod: ExposeByPod{
|
||||
HostingPod: pod,
|
||||
HostingContainer: containerName,
|
||||
VolumeName: volumeName,
|
||||
NodeOS: nodeOS,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,8 @@ type GenericRestoreExposeParam struct {
|
||||
// TargetPVCName is the target volume name to be restored
|
||||
TargetPVCName string
|
||||
|
||||
// SourceNamespace is the original namespace of the volume that the snapshot is taken for
|
||||
SourceNamespace string
|
||||
// TargetNamespace is the namespace of the volume to be restored
|
||||
TargetNamespace string
|
||||
|
||||
// HostingPodLabels is the labels that are going to apply to the hosting pod
|
||||
HostingPodLabels map[string]string
|
||||
@@ -52,6 +52,12 @@ type GenericRestoreExposeParam struct {
|
||||
// ExposeTimeout specifies the timeout for the entire expose process
|
||||
ExposeTimeout time.Duration
|
||||
|
||||
// OperationTimeout specifies the time wait for resources operations in Expose
|
||||
OperationTimeout time.Duration
|
||||
|
||||
// NodeOS specifies the OS of node that the volume should be attached
|
||||
NodeOS string
|
||||
|
||||
// RestorePVCConfig is the config for restorePVC (intermediate PVC) of generic restore
|
||||
RestorePVCConfig nodeagent.RestorePVC
|
||||
}
|
||||
@@ -99,21 +105,21 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1.O
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
"owner": ownerObject.Name,
|
||||
"target PVC": param.TargetPVCName,
|
||||
"source namespace": param.SourceNamespace,
|
||||
"target namespace": param.TargetNamespace,
|
||||
})
|
||||
|
||||
selectedNode, targetPVC, err := kube.WaitPVCConsumed(ctx, e.kubeClient.CoreV1(), param.TargetPVCName, param.SourceNamespace, e.kubeClient.StorageV1(), param.ExposeTimeout, param.RestorePVCConfig.IgnoreDelayBinding)
|
||||
selectedNode, targetPVC, err := kube.WaitPVCConsumed(ctx, e.kubeClient.CoreV1(), param.TargetPVCName, param.TargetNamespace, e.kubeClient.StorageV1(), param.ExposeTimeout, param.RestorePVCConfig.IgnoreDelayBinding)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to wait target PVC consumed, %s/%s", param.SourceNamespace, param.TargetPVCName)
|
||||
return errors.Wrapf(err, "error to wait target PVC consumed, %s/%s", param.TargetNamespace, param.TargetPVCName)
|
||||
}
|
||||
|
||||
curLog.WithField("target PVC", param.TargetPVCName).WithField("selected node", selectedNode).Info("Target PVC is consumed")
|
||||
|
||||
if kube.IsPVCBound(targetPVC) {
|
||||
return errors.Errorf("Target PVC %s/%s has already been bound, abort", param.SourceNamespace, param.TargetPVCName)
|
||||
return errors.Errorf("Target PVC %s/%s has already been bound, abort", param.TargetNamespace, param.TargetPVCName)
|
||||
}
|
||||
|
||||
restorePod, err := e.createRestorePod(ctx, ownerObject, targetPVC, param.ExposeTimeout, param.HostingPodLabels, selectedNode, param.Resources)
|
||||
restorePod, err := e.createRestorePod(ctx, ownerObject, targetPVC, param.OperationTimeout, param.HostingPodLabels, selectedNode, param.Resources, param.NodeOS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to create restore pod")
|
||||
}
|
||||
@@ -274,19 +280,19 @@ func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1.
|
||||
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), restorePVCName, ownerObject.Namespace, 0, e.log)
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject corev1.ObjectReference, targetPVCName string, sourceNamespace string, timeout time.Duration) error {
|
||||
func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject corev1.ObjectReference, targetPVCName string, targetNamespace string, timeout time.Duration) error {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
"owner": ownerObject.Name,
|
||||
"target PVC": targetPVCName,
|
||||
"source namespace": sourceNamespace,
|
||||
"target namespace": targetNamespace,
|
||||
})
|
||||
|
||||
targetPVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(sourceNamespace).Get(ctx, targetPVCName, metav1.GetOptions{})
|
||||
targetPVC, err := e.kubeClient.CoreV1().PersistentVolumeClaims(targetNamespace).Get(ctx, targetPVCName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to get target PVC %s/%s", sourceNamespace, targetPVCName)
|
||||
return errors.Wrapf(err, "error to get target PVC %s/%s", targetNamespace, targetPVCName)
|
||||
}
|
||||
|
||||
restorePV, err := kube.WaitPVCBound(ctx, e.kubeClient.CoreV1(), e.kubeClient.CoreV1(), restorePVCName, ownerObject.Namespace, timeout)
|
||||
@@ -368,7 +374,7 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim,
|
||||
operationTimeout time.Duration, label map[string]string, selectedNode string, resources corev1.ResourceRequirements) (*corev1.Pod, error) {
|
||||
operationTimeout time.Duration, label map[string]string, selectedNode string, resources corev1.ResourceRequirements, nodeType string) (*corev1.Pod, error) {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
@@ -409,7 +415,28 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
args = append(args, podInfo.logFormatArgs...)
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
userID := int64(0)
|
||||
var securityCtx *corev1.PodSecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1.PodOS{}
|
||||
if nodeType == kube.NodeOSWindows {
|
||||
userID := "ContainerAdministrator"
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
RunAsUserName: &userID,
|
||||
},
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
|
||||
podOS.Name = kube.NodeOSWindows
|
||||
} else {
|
||||
userID := int64(0)
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
|
||||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
|
||||
podOS.Name = kube.NodeOSLinux
|
||||
}
|
||||
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -427,6 +454,8 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
Labels: label,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
NodeSelector: nodeSelector,
|
||||
OS: &podOS,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
@@ -450,9 +479,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
||||
Volumes: volumes,
|
||||
NodeName: selectedNode,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
},
|
||||
SecurityContext: securityCtx,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
clientTesting "k8s.io/client-go/testing"
|
||||
)
|
||||
@@ -76,9 +75,9 @@ func TestRestoreExpose(t *testing.T) {
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Image: "fake-image",
|
||||
},
|
||||
@@ -93,21 +92,21 @@ func TestRestoreExpose(t *testing.T) {
|
||||
kubeClientObj []runtime.Object
|
||||
ownerRestore *velerov1.Restore
|
||||
targetPVCName string
|
||||
sourceNamespace string
|
||||
targetNamespace string
|
||||
kubeReactors []reactor
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "wait target pvc consumed fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
err: "error to wait target PVC consumed, fake-ns/fake-target-pvc: error to wait for PVC: error to get pvc fake-ns/fake-target-pvc: persistentvolumeclaims \"fake-target-pvc\" not found",
|
||||
},
|
||||
{
|
||||
name: "target pvc is already bound",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObjBound,
|
||||
@@ -117,7 +116,7 @@ func TestRestoreExpose(t *testing.T) {
|
||||
{
|
||||
name: "create restore pod fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -137,7 +136,7 @@ func TestRestoreExpose(t *testing.T) {
|
||||
{
|
||||
name: "create restore pvc fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -182,9 +181,9 @@ func TestRestoreExpose(t *testing.T) {
|
||||
|
||||
err := exposer.Expose(context.Background(), ownerObject, GenericRestoreExposeParam{
|
||||
TargetPVCName: test.targetPVCName,
|
||||
SourceNamespace: test.sourceNamespace,
|
||||
TargetNamespace: test.targetNamespace,
|
||||
HostingPodLabels: map[string]string{},
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
Resources: corev1api.ResourceRequirements{},
|
||||
ExposeTimeout: time.Millisecond})
|
||||
assert.EqualError(t, err, test.err)
|
||||
})
|
||||
@@ -244,21 +243,21 @@ func TestRebindVolume(t *testing.T) {
|
||||
kubeClientObj []runtime.Object
|
||||
ownerRestore *velerov1.Restore
|
||||
targetPVCName string
|
||||
sourceNamespace string
|
||||
targetNamespace string
|
||||
kubeReactors []reactor
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "get target pvc fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
err: "error to get target PVC fake-ns/fake-target-pvc: persistentvolumeclaims \"fake-target-pvc\" not found",
|
||||
},
|
||||
{
|
||||
name: "wait restore pvc bound fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -268,7 +267,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
{
|
||||
name: "retain target pv fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -289,7 +288,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
{
|
||||
name: "delete restore pod fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -311,7 +310,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
{
|
||||
name: "delete restore pvc fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -333,7 +332,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
{
|
||||
name: "rebind target pvc fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -355,7 +354,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
{
|
||||
name: "reset pv binding fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -382,7 +381,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
{
|
||||
name: "wait restore PV bound fail",
|
||||
targetPVCName: "fake-target-pvc",
|
||||
sourceNamespace: "fake-ns",
|
||||
targetNamespace: "fake-ns",
|
||||
ownerRestore: restore,
|
||||
kubeClientObj: []runtime.Object{
|
||||
targetPVCObj,
|
||||
@@ -420,7 +419,7 @@ func TestRebindVolume(t *testing.T) {
|
||||
|
||||
hookCount = 0
|
||||
|
||||
err := exposer.RebindVolume(context.Background(), ownerObject, test.targetPVCName, test.sourceNamespace, time.Millisecond)
|
||||
err := exposer.RebindVolume(context.Background(), ownerObject, test.targetPVCName, test.targetNamespace, time.Millisecond)
|
||||
assert.EqualError(t, err, test.err)
|
||||
})
|
||||
}
|
||||
@@ -526,7 +525,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
restorePodWithoutNodeName := corev1.Pod{
|
||||
restorePodWithoutNodeName := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
@@ -539,19 +538,19 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodPending,
|
||||
Conditions: []corev1.PodCondition{
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1.PodInitialized,
|
||||
Status: corev1.ConditionTrue,
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Message: "fake-pod-message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
restorePodWithNodeName := corev1.Pod{
|
||||
restorePodWithNodeName := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
@@ -564,22 +563,22 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodPending,
|
||||
Conditions: []corev1.PodCondition{
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1.PodInitialized,
|
||||
Status: corev1.ConditionTrue,
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Message: "fake-pod-message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
restorePVCWithoutVolumeName := corev1.PersistentVolumeClaim{
|
||||
restorePVCWithoutVolumeName := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
@@ -592,12 +591,12 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PersistentVolumeClaimStatus{
|
||||
Phase: corev1.ClaimPending,
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
}
|
||||
|
||||
restorePVCWithVolumeName := corev1.PersistentVolumeClaim{
|
||||
restorePVCWithVolumeName := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-restore",
|
||||
@@ -610,35 +609,35 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1.PersistentVolumeClaimStatus{
|
||||
Phase: corev1.ClaimPending,
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
}
|
||||
|
||||
restorePV := corev1.PersistentVolume{
|
||||
restorePV := corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pv",
|
||||
},
|
||||
Status: corev1.PersistentVolumeStatus{
|
||||
Phase: corev1.VolumePending,
|
||||
Status: corev1api.PersistentVolumeStatus{
|
||||
Phase: corev1api.VolumePending,
|
||||
Message: "fake-pv-message",
|
||||
},
|
||||
}
|
||||
|
||||
nodeAgentPod := corev1.Pod{
|
||||
nodeAgentPod := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "node-agent-pod-1",
|
||||
Labels: map[string]string{"role": "node-agent"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodRunning,
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodRunning,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -38,4 +38,5 @@ type ExposeByPod struct {
|
||||
HostingPod *corev1.Pod
|
||||
HostingContainer string
|
||||
VolumeName string
|
||||
NodeOS *string
|
||||
}
|
||||
|
||||
@@ -478,6 +478,7 @@ func newDataDownload(
|
||||
SnapshotID: dataUploadResult.SnapshotID,
|
||||
SourceNamespace: dataUploadResult.SourceNamespace,
|
||||
OperationTimeout: backup.Spec.CSISnapshotTimeout,
|
||||
NodeOS: dataUploadResult.NodeOS,
|
||||
},
|
||||
}
|
||||
if restore.Spec.UploaderConfig != nil {
|
||||
|
||||
@@ -78,6 +78,7 @@ func (d *DataUploadRetrieveAction) Execute(input *velero.RestoreItemActionExecut
|
||||
SnapshotID: dataUpload.Status.SnapshotID,
|
||||
SourceNamespace: dataUpload.Spec.SourceNamespace,
|
||||
DataMoverResult: dataUpload.Status.DataMoverResult,
|
||||
NodeOS: dataUpload.Status.NodeOS,
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(dataUploadResult)
|
||||
|
||||
Reference in New Issue
Block a user