Merge branch 'main' into restore-pvc-ignore-wait-for-first-consumer

This commit is contained in:
Lyndon-Li
2025-01-03 14:14:37 +08:00
37 changed files with 1181 additions and 134 deletions

View File

@@ -0,0 +1 @@
Make fs-backup work on linux nodes with the new Velero deployment and disable fs-backup if the source/target pod is running in non-linux node (#8424)

View File

@@ -0,0 +1 @@
Fix issue #7810, add maintenance history for backupRepository CRs

View File

@@ -0,0 +1 @@
Fix issue #8418, support data mover backup for Windows nodes

View File

@@ -0,0 +1 @@
fs uploader and block uploader support Windows nodes

View File

@@ -88,8 +88,8 @@ spec:
description: BackupRepositoryStatus is the current status of a BackupRepository.
properties:
lastMaintenanceTime:
description: LastMaintenanceTime is the last time maintenance was
run.
description: LastMaintenanceTime is the last time repo maintenance
succeeded.
format: date-time
nullable: true
type: string
@@ -104,6 +104,33 @@ spec:
- Ready
- NotReady
type: string
recentMaintenance:
description: RecentMaintenance is status of the recent repo maintenance.
items:
properties:
completeTimestamp:
description: CompleteTimestamp is the completion time of the
repo maintenance.
format: date-time
nullable: true
type: string
message:
description: Message is a message about the current status of
the repo maintenance.
type: string
result:
description: Result is the result of the repo maintenance.
enum:
- Succeeded
- Failed
type: string
startTimestamp:
description: StartTimestamp is the start time of the repo maintenance.
format: date-time
nullable: true
type: string
type: object
type: array
type: object
type: object
served: true

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,5 @@
diff --git a/go.mod b/go.mod
index 5f939c481..1caa51275 100644
index 5f939c481..95d29c82b 100644
--- a/go.mod
+++ b/go.mod
@@ -24,32 +24,32 @@ require (
@@ -9,17 +9,18 @@ index 5f939c481..1caa51275 100644
- golang.org/x/crypto v0.5.0
- golang.org/x/net v0.5.0
- golang.org/x/oauth2 v0.4.0
+ golang.org/x/crypto v0.21.0
+ golang.org/x/net v0.23.0
+ golang.org/x/oauth2 v0.7.0
golang.org/x/sync v0.1.0
- golang.org/x/sync v0.1.0
- golang.org/x/sys v0.4.0
- golang.org/x/term v0.4.0
- golang.org/x/text v0.6.0
- google.golang.org/api v0.106.0
+ golang.org/x/sys v0.18.0
+ golang.org/x/term v0.18.0
+ golang.org/x/text v0.14.0
+ golang.org/x/crypto v0.31.0
+ golang.org/x/net v0.33.0
+ golang.org/x/oauth2 v0.7.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/sys v0.28.0
+ golang.org/x/term v0.27.0
+ golang.org/x/text v0.21.0
+ google.golang.org/api v0.114.0
)
@@ -62,7 +63,7 @@ index 5f939c481..1caa51275 100644
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 026e1d2fa..27d4207f4 100644
index 026e1d2fa..d164b17e6 100644
--- a/go.sum
+++ b/go.sum
@@ -1,13 +1,13 @@
@@ -126,19 +127,19 @@ index 026e1d2fa..27d4207f4 100644
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -189,11 +189,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
@@ -189,17 +189,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
@@ -147,27 +148,35 @@ index 026e1d2fa..27d4207f4 100644
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -214,17 +214,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=

View File

@@ -71,10 +71,43 @@ type BackupRepositoryStatus struct {
// +optional
Message string `json:"message,omitempty"`
// LastMaintenanceTime is the last time maintenance was run.
// LastMaintenanceTime is the last time repo maintenance succeeded.
// +optional
// +nullable
LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"`
// RecentMaintenance is status of the recent repo maintenance.
// +optional
RecentMaintenance []BackupRepositoryMaintenanceStatus `json:"recentMaintenance,omitempty"`
}
// BackupRepositoryMaintenanceResult represents the result of a repo maintenance.
// +kubebuilder:validation:Enum=Succeeded;Failed
type BackupRepositoryMaintenanceResult string
const (
BackupRepositoryMaintenanceSucceeded BackupRepositoryMaintenanceResult = "Succeeded"
BackupRepositoryMaintenanceFailed BackupRepositoryMaintenanceResult = "Failed"
)
type BackupRepositoryMaintenanceStatus struct {
// Result is the result of the repo maintenance.
// +optional
Result BackupRepositoryMaintenanceResult `json:"result,omitempty"`
// StartTimestamp is the start time of the repo maintenance.
// +optional
// +nullable
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
// CompleteTimestamp is the completion time of the repo maintenance.
// +optional
// +nullable
CompleteTimestamp *metav1.Time `json:"completeTimestamp,omitempty"`
// Message is a message about the current status of the repo maintenance.
// +optional
Message string `json:"message,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client,

View File

@@ -165,6 +165,29 @@ func (in *BackupRepositoryList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupRepositoryMaintenanceStatus) DeepCopyInto(out *BackupRepositoryMaintenanceStatus) {
*out = *in
if in.StartTimestamp != nil {
in, out := &in.StartTimestamp, &out.StartTimestamp
*out = (*in).DeepCopy()
}
if in.CompleteTimestamp != nil {
in, out := &in.CompleteTimestamp, &out.CompleteTimestamp
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryMaintenanceStatus.
func (in *BackupRepositoryMaintenanceStatus) DeepCopy() *BackupRepositoryMaintenanceStatus {
if in == nil {
return nil
}
out := new(BackupRepositoryMaintenanceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) {
*out = *in
@@ -195,6 +218,13 @@ func (in *BackupRepositoryStatus) DeepCopyInto(out *BackupRepositoryStatus) {
in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime
*out = (*in).DeepCopy()
}
if in.RecentMaintenance != nil {
in, out := &in.RecentMaintenance, &out.RecentMaintenance
*out = make([]BackupRepositoryMaintenanceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryStatus.

View File

@@ -697,6 +697,7 @@ func (kb *kubernetesBackupper) backupItemBlock(ctx context.Context, itemBlock Ba
if len(postHookPods) > 0 {
itemBlock.Log.Debug("Executing post hooks")
itemBlock.itemBackupper.hookTracker.AsyncItemBlocks.Add(1)
go kb.handleItemBlockPostHooks(ctx, itemBlock, postHookPods)
}
@@ -735,7 +736,6 @@ func (kb *kubernetesBackupper) handleItemBlockPreHooks(itemBlock BackupItemBlock
// The hooks cannot execute until the PVBs to be processed
func (kb *kubernetesBackupper) handleItemBlockPostHooks(ctx context.Context, itemBlock BackupItemBlock, hookPods []itemblock.ItemBlockItem) {
log := itemBlock.Log
itemBlock.itemBackupper.hookTracker.AsyncItemBlocks.Add(1)
defer itemBlock.itemBackupper.hookTracker.AsyncItemBlocks.Done()
if err := kb.waitUntilPVBsProcessed(ctx, log, itemBlock, hookPods); err != nil {
@@ -806,7 +806,7 @@ func (kb *kubernetesBackupper) waitUntilPVBsProcessed(ctx context.Context, log l
return allProcessed, nil
}
return wait.PollUntilContextCancel(ctx, 5*time.Second, false, checkFunc)
return wait.PollUntilContextCancel(ctx, 5*time.Second, true, checkFunc)
}
func (kb *kubernetesBackupper) backupItem(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource, itemBlock *BackupItemBlock) bool {

View File

@@ -3464,59 +3464,57 @@ func TestBackupWithHooks(t *testing.T) {
wantBackedUp []string
wantHookExecutionLog []test.HookExecutionEntry
}{
/*
{
name: "pre hook with no resource filters runs for all pods",
backup: defaultBackup().
Hooks(velerov1.BackupHooks{
Resources: []velerov1.BackupResourceHookSpec{
{
Name: "hook-1",
PreHooks: []velerov1.BackupResourceHook{
{
Exec: &velerov1.ExecHook{
Command: []string{"ls", "/tmp"},
},
{
name: "pre hook with no resource filters runs for all pods",
backup: defaultBackup().
Hooks(velerov1.BackupHooks{
Resources: []velerov1.BackupResourceHookSpec{
{
Name: "hook-1",
PreHooks: []velerov1.BackupResourceHook{
{
Exec: &velerov1.ExecHook{
Command: []string{"ls", "/tmp"},
},
},
},
},
}).
Result(),
apiResources: []*test.APIResource{
test.Pods(
builder.ForPod("ns-1", "pod-1").Result(),
builder.ForPod("ns-2", "pod-2").Result(),
),
},
wantExecutePodCommandCalls: []*expectedCall{
{
podNamespace: "ns-1",
podName: "pod-1",
hookName: "hook-1",
hook: &velerov1.ExecHook{
Command: []string{"ls", "/tmp"},
},
err: nil,
},
{
podNamespace: "ns-2",
podName: "pod-2",
hookName: "hook-1",
hook: &velerov1.ExecHook{
Command: []string{"ls", "/tmp"},
},
err: nil,
}).
Result(),
apiResources: []*test.APIResource{
test.Pods(
builder.ForPod("ns-1", "pod-1").Result(),
builder.ForPod("ns-2", "pod-2").Result(),
),
},
wantExecutePodCommandCalls: []*expectedCall{
{
podNamespace: "ns-1",
podName: "pod-1",
hookName: "hook-1",
hook: &velerov1.ExecHook{
Command: []string{"ls", "/tmp"},
},
err: nil,
},
wantBackedUp: []string{
"resources/pods/namespaces/ns-1/pod-1.json",
"resources/pods/namespaces/ns-2/pod-2.json",
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
{
podNamespace: "ns-2",
podName: "pod-2",
hookName: "hook-1",
hook: &velerov1.ExecHook{
Command: []string{"ls", "/tmp"},
},
err: nil,
},
},
*/
wantBackedUp: []string{
"resources/pods/namespaces/ns-1/pod-1.json",
"resources/pods/namespaces/ns-2/pod-2.json",
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
},
},
{
name: "post hook with no resource filters runs for all pods",
backup: defaultBackup().

View File

@@ -168,7 +168,24 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi
return nil, errors.Wrap(err, "error to create client")
}
cache, err := ctlcache.New(clientConfig, cacheOption)
var cache ctlcache.Cache
retry := 10
for {
cache, err = ctlcache.New(clientConfig, cacheOption)
if err == nil {
break
}
retry--
if retry == 0 {
break
}
logger.WithError(err).Warn("Failed to create client cache, need retry")
time.Sleep(time.Second)
}
if err != nil {
cancelFunc()
return nil, errors.Wrap(err, "error to create client cache")

View File

@@ -398,7 +398,9 @@ func (o *Options) Run(c *cobra.Command, f client.Factory) error {
if _, err = install.NodeAgentIsReady(dynamicFactory, o.Namespace); err != nil {
return errors.Wrap(err, errorMsg)
}
}
if o.UseNodeAgentWindows {
fmt.Println("Waiting for node-agent-windows daemonset to be ready.")
if _, err = install.NodeAgentWindowsIsReady(dynamicFactory, o.Namespace); err != nil {
return errors.Wrap(err, errorMsg)

View File

@@ -82,6 +82,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/restore"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)
@@ -471,10 +472,20 @@ func (s *server) veleroResourcesExist() error {
func (s *server) checkNodeAgent() {
// warn if node agent does not exist
if err := nodeagent.IsRunning(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound {
s.logger.Warn("Velero node agent not found; pod volume backups/restores will not work until it's created")
} else if err != nil {
s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent")
if kube.WithLinuxNode(s.ctx, s.crClient, s.logger) {
if err := nodeagent.IsRunningOnLinux(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound {
s.logger.Warn("Velero node agent not found for linux nodes; pod volume backups/restores and data mover backups/restores will not work until it's created")
} else if err != nil {
s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent for linux nodes")
}
}
if kube.WithWindowsNode(s.ctx, s.crClient, s.logger) {
if err := nodeagent.IsRunningOnWindows(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound {
s.logger.Warn("Velero node agent not found for Windows nodes; pod volume backups/restores and data mover backups/restores will not work until it's created")
} else if err != nil {
s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent for Windows nodes")
}
}
}

View File

@@ -46,8 +46,9 @@ import (
)
const (
repoSyncPeriod = 5 * time.Minute
defaultMaintainFrequency = 7 * 24 * time.Hour
repoSyncPeriod = 5 * time.Minute
defaultMaintainFrequency = 7 * 24 * time.Hour
defaultMaintenanceStatusQueueLength = 3
)
type BackupRepoReconciler struct {
@@ -299,9 +300,9 @@ func ensureRepo(repo *velerov1api.BackupRepository, repoManager repomanager.Mana
}
func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
now := r.clock.Now()
startTime := r.clock.Now()
if !dueForMaintenance(req, now) {
if !dueForMaintenance(req, startTime) {
log.Debug("not due for maintenance")
return nil
}
@@ -315,16 +316,33 @@ func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel
if err := r.repositoryManager.PruneRepo(req); err != nil {
log.WithError(err).Warn("error pruning repository")
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Status.Message = err.Error()
updateRepoMaintenanceHistory(rr, velerov1api.BackupRepositoryMaintenanceFailed, startTime, r.clock.Now(), err.Error())
})
}
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Status.Message = ""
rr.Status.LastMaintenanceTime = &metav1.Time{Time: now}
completionTime := r.clock.Now()
rr.Status.LastMaintenanceTime = &metav1.Time{Time: completionTime}
updateRepoMaintenanceHistory(rr, velerov1api.BackupRepositoryMaintenanceSucceeded, startTime, completionTime, "")
})
}
func updateRepoMaintenanceHistory(repo *velerov1api.BackupRepository, result velerov1api.BackupRepositoryMaintenanceResult, startTime time.Time, completionTime time.Time, message string) {
latest := velerov1api.BackupRepositoryMaintenanceStatus{
Result: result,
StartTimestamp: &metav1.Time{Time: startTime},
CompleteTimestamp: &metav1.Time{Time: completionTime},
Message: message,
}
startingPos := 0
if len(repo.Status.RecentMaintenance) >= defaultMaintenanceStatusQueueLength {
startingPos = len(repo.Status.RecentMaintenance) - defaultMaintenanceStatusQueueLength + 1
}
repo.Status.RecentMaintenance = append(repo.Status.RecentMaintenance[startingPos:], latest)
}
func dueForMaintenance(req *velerov1api.BackupRepository, now time.Time) bool {
return req.Status.LastMaintenanceTime == nil || req.Status.LastMaintenanceTime.Add(req.Spec.MaintenanceFrequency.Duration).Before(now)
}

View File

@@ -486,3 +486,180 @@ func TestGetBackupRepositoryConfig(t *testing.T) {
})
}
}
func TestUpdateRepoMaintenanceHistory(t *testing.T) {
standardTime := time.Now()
backupRepoWithoutHistory := &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
}
backupRepoWithHistory := &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Status: velerov1api.BackupRepositoryStatus{
RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 24)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 23)},
Message: "fake-history-message-1",
},
},
},
}
backupRepoWithFullHistory := &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Status: velerov1api.BackupRepositoryStatus{
RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 24)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 23)},
Message: "fake-history-message-2",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 22)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 21)},
Message: "fake-history-message-3",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 20)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 19)},
Message: "fake-history-message-4",
},
},
},
}
backupRepoWithOverFullHistory := &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Status: velerov1api.BackupRepositoryStatus{
RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 24)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 23)},
Message: "fake-history-message-5",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 22)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 21)},
Message: "fake-history-message-6",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 20)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 19)},
Message: "fake-history-message-7",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 18)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 17)},
Message: "fake-history-message-8",
},
},
},
}
tests := []struct {
name string
backupRepo *velerov1api.BackupRepository
result velerov1api.BackupRepositoryMaintenanceResult
expectedHistory []velerov1api.BackupRepositoryMaintenanceStatus
}{
{
name: "empty history",
backupRepo: backupRepoWithoutHistory,
result: velerov1api.BackupRepositoryMaintenanceSucceeded,
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(time.Hour)},
Message: "fake-message-0",
},
},
},
{
name: "less than history queue length",
backupRepo: backupRepoWithHistory,
result: velerov1api.BackupRepositoryMaintenanceSucceeded,
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 24)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 23)},
Message: "fake-history-message-1",
},
{
StartTimestamp: &metav1.Time{Time: standardTime},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(time.Hour)},
Message: "fake-message-0",
},
},
},
{
name: "full history",
backupRepo: backupRepoWithFullHistory,
result: velerov1api.BackupRepositoryMaintenanceFailed,
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 22)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 21)},
Message: "fake-history-message-3",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 20)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 19)},
Message: "fake-history-message-4",
},
{
StartTimestamp: &metav1.Time{Time: standardTime},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(time.Hour)},
Message: "fake-message-0",
},
},
},
{
name: "over full history",
backupRepo: backupRepoWithOverFullHistory,
result: velerov1api.BackupRepositoryMaintenanceFailed,
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 20)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 19)},
Message: "fake-history-message-7",
},
{
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 18)},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 17)},
Message: "fake-history-message-8",
},
{
StartTimestamp: &metav1.Time{Time: standardTime},
CompleteTimestamp: &metav1.Time{Time: standardTime.Add(time.Hour)},
Message: "fake-message-0",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
updateRepoMaintenanceHistory(test.backupRepo, test.result, standardTime, standardTime.Add(time.Hour), "fake-message-0")
for at := range test.backupRepo.Status.RecentMaintenance {
assert.Equal(t, test.expectedHistory[at].StartTimestamp.Time, test.backupRepo.Status.RecentMaintenance[at].StartTimestamp.Time)
assert.Equal(t, test.expectedHistory[at].CompleteTimestamp.Time, test.backupRepo.Status.RecentMaintenance[at].CompleteTimestamp.Time)
assert.Equal(t, test.expectedHistory[at].Message, test.backupRepo.Status.RecentMaintenance[at].Message)
}
})
}
}

View File

@@ -185,7 +185,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k); err != nil {
if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k, kube.NodeOSLinux); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}

View File

@@ -803,6 +803,15 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
}
nodeOS, err := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), r.logger)
if err != nil {
return nil, errors.Wrapf(err, "failed to get attaching node OS for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
}
if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil {
return nil, errors.Wrapf(err, "no appropriate node to run data upload for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC)
}
accessMode := exposer.AccessModeFileSystem
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1.PersistentVolumeBlock {
accessMode = exposer.AccessModeBlock
@@ -810,7 +819,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
hostingPodLabels := map[string]string{velerov1api.DataUploadLabel: du.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k); err != nil {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil {
if err != nodeagent.ErrNodeAgentLabelNotFound {
r.logger.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k)
}
@@ -831,6 +840,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
Affinity: r.loadAffinity,
BackupPVCConfig: r.backupPVCConfig,
Resources: r.podResources,
NodeOS: nodeOS,
}, nil
}
return nil, nil

View File

@@ -59,6 +59,7 @@ import (
velerotest "github.com/vmware-tanzu/velero/pkg/test"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
const dataUploadName = "dataupload-1"
@@ -187,6 +188,8 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
},
}
node := builder.ForNode("fake-node").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result()
dataPathMgr := datapath.NewManager(1)
now, err := time.Parse(time.RFC1123, time.RFC1123)
@@ -229,7 +232,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
}
fakeSnapshotClient := snapshotFake.NewSimpleClientset(vsObject, vscObj)
fakeKubeClient := clientgofake.NewSimpleClientset(daemonSet)
fakeKubeClient := clientgofake.NewSimpleClientset(daemonSet, node)
return NewDataUploadReconciler(
fakeClient,

View File

@@ -73,6 +73,9 @@ type CSISnapshotExposeParam struct {
// Resources defines the resource requirements of the hosting pod
Resources corev1.ResourceRequirements
// NodeOS specifies the OS of node that the source volume is attaching
NodeOS string
}
// CSISnapshotExposeWaitParam define the input param for WaitExposed of CSI snapshots
@@ -212,6 +215,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje
csiExposeParam.Resources,
backupPVCReadOnly,
spcNoRelabeling,
csiExposeParam.NodeOS,
)
if err != nil {
return errors.Wrap(err, "error to create backup pod")
@@ -517,13 +521,14 @@ func (e *csiSnapshotExposer) createBackupPod(
resources corev1.ResourceRequirements,
backupPVCReadOnly bool,
spcNoRelabeling bool,
nodeOS string,
) (*corev1.Pod, error) {
podName := ownerObject.Name
containerName := string(ownerObject.UID)
volumeName := string(ownerObject.UID)
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace)
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, nodeOS)
if err != nil {
return nil, errors.Wrap(err, "error to get inherited pod info from node-agent")
}
@@ -567,13 +572,40 @@ func (e *csiSnapshotExposer) createBackupPod(
args = append(args, podInfo.logFormatArgs...)
args = append(args, podInfo.logLevelArgs...)
userID := int64(0)
affinityList := make([]*kube.LoadAffinity, 0)
if affinity != nil {
affinityList = append(affinityList, affinity)
}
var securityCtx *corev1.PodSecurityContext
nodeSelector := map[string]string{}
podOS := corev1.PodOS{}
if nodeOS == kube.NodeOSWindows {
userID := "ContainerAdministrator"
securityCtx = &corev1.PodSecurityContext{
WindowsOptions: &corev1.WindowsSecurityContextOptions{
RunAsUserName: &userID,
},
}
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
podOS.Name = kube.NodeOSWindows
} else {
userID := int64(0)
securityCtx = &corev1.PodSecurityContext{
RunAsUser: &userID,
}
if spcNoRelabeling {
securityCtx.SELinuxOptions = &corev1.SELinuxOptions{
Type: "spc_t",
}
}
nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux
podOS.Name = kube.NodeOSLinux
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@@ -602,7 +634,9 @@ func (e *csiSnapshotExposer) createBackupPod(
},
},
},
Affinity: kube.ToSystemAffinity(affinityList),
NodeSelector: nodeSelector,
OS: &podOS,
Affinity: kube.ToSystemAffinity(affinityList),
Containers: []corev1.Container{
{
Name: containerName,
@@ -625,17 +659,9 @@ func (e *csiSnapshotExposer) createBackupPod(
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: volumes,
RestartPolicy: corev1.RestartPolicyNever,
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: &userID,
},
SecurityContext: securityCtx,
},
}
if spcNoRelabeling {
pod.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{
Type: "spc_t",
}
}
return e.kubeClient.CoreV1().Pods(ownerObject.Namespace).Create(ctx, pod, metav1.CreateOptions{})
}

View File

@@ -375,7 +375,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
containerName := string(ownerObject.UID)
volumeName := string(ownerObject.UID)
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace)
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, kube.NodeOSLinux)
if err != nil {
return nil, errors.Wrap(err, "error to get inherited pod info from node-agent")
}

View File

@@ -38,10 +38,10 @@ type inheritedPodInfo struct {
logFormatArgs []string
}
func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veleroNamespace string) (inheritedPodInfo, error) {
func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veleroNamespace string, osType string) (inheritedPodInfo, error) {
podInfo := inheritedPodInfo{}
podSpec, err := nodeagent.GetPodSpec(ctx, client, veleroNamespace)
podSpec, err := nodeagent.GetPodSpec(ctx, client, veleroNamespace, osType)
if err != nil {
return podInfo, errors.Wrap(err, "error to get node-agent pod template")
}

View File

@@ -26,6 +26,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/util/kube"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
@@ -322,7 +324,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
info, err := getInheritedPodInfo(context.Background(), fakeKubeClient, test.namespace)
info, err := getInheritedPodInfo(context.Background(), fakeKubeClient, test.namespace, kube.NodeOSLinux)
if test.expectErr == "" {
assert.NoError(t, err)

View File

@@ -33,9 +33,12 @@ import (
)
const (
// daemonSet is the name of the Velero node agent daemonset.
// daemonSet is the name of the Velero node agent daemonset on linux nodes.
daemonSet = "node-agent"
// daemonsetWindows is the name of the Velero node agent daemonset on Windows nodes.
daemonsetWindows = "node-agent-windows"
// nodeAgentRole marks pods with node-agent role on all nodes.
nodeAgentRole = "node-agent"
)
@@ -100,9 +103,16 @@ type Configs struct {
PodResources *kube.PodResources `json:"podResources,omitempty"`
}
// IsRunning checks if the node agent daemonset is running properly. If not, return the error found
func IsRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error {
if _, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) {
func IsRunningOnLinux(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error {
return isRunning(ctx, kubeClient, namespace, daemonSet)
}
func IsRunningOnWindows(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error {
return isRunning(ctx, kubeClient, namespace, daemonsetWindows)
}
func isRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace string, daemonset string) error {
if _, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonset, metav1.GetOptions{}); apierrors.IsNotFound(err) {
return ErrDaemonSetNotFound
} else if err != nil {
return err
@@ -155,10 +165,15 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC
return errors.Errorf("daemonset pod not found in running state in node %s", nodeName)
}
func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (*v1.PodSpec, error) {
ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{})
func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace string, osType string) (*v1.PodSpec, error) {
dsName := daemonSet
if osType == kube.NodeOSWindows {
dsName = daemonsetWindows
}
ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, dsName, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, "error to get node-agent daemonset")
return nil, errors.Wrapf(err, "error to get %s daemonset", dsName)
}
return &ds.Spec.Template.Spec, nil
@@ -188,10 +203,15 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int
return configs, nil
}
func GetLabelValue(ctx context.Context, kubeClient kubernetes.Interface, namespace string, key string) (string, error) {
ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{})
func GetLabelValue(ctx context.Context, kubeClient kubernetes.Interface, namespace string, key string, osType string) (string, error) {
dsName := daemonSet
if osType == kube.NodeOSWindows {
dsName = daemonsetWindows
}
ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, dsName, metav1.GetOptions{})
if err != nil {
return "", errors.Wrap(err, "error getting node-agent daemonset")
return "", errors.Wrapf(err, "error getting %s daemonset", dsName)
}
if ds.Spec.Template.Labels == nil {

View File

@@ -31,6 +31,7 @@ import (
clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/vmware-tanzu/velero/pkg/builder"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
type reactor struct {
@@ -40,7 +41,7 @@ type reactor struct {
}
func TestIsRunning(t *testing.T) {
daemonSet := &appsv1.DaemonSet{
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
@@ -80,7 +81,7 @@ func TestIsRunning(t *testing.T) {
name: "succeed",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSet,
ds,
},
},
}
@@ -93,7 +94,7 @@ func TestIsRunning(t *testing.T) {
fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc)
}
err := IsRunning(context.TODO(), fakeKubeClient, test.namespace)
err := isRunning(context.TODO(), fakeKubeClient, test.namespace, daemonSet)
if test.expectErr == "" {
assert.NoError(t, err)
} else {
@@ -229,7 +230,7 @@ func TestGetPodSpec(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
spec, err := GetPodSpec(context.TODO(), fakeKubeClient, test.namespace)
spec, err := GetPodSpec(context.TODO(), fakeKubeClient, test.namespace, kube.NodeOSLinux)
if test.expectErr == "" {
assert.NoError(t, err)
assert.Equal(t, *spec, test.expectSpec)
@@ -450,7 +451,7 @@ func TestGetLabelValue(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label")
value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label", kube.NodeOSLinux)
if test.expectErr == "" {
assert.NoError(t, err)
assert.Equal(t, test.expectedValue, value)

View File

@@ -206,6 +206,12 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
return nil, pvcSummary, nil
}
if err := kube.IsLinuxNode(b.ctx, pod.Spec.NodeName, b.crClient); err != nil {
err := errors.Wrapf(err, "Pod %s/%s is not running in linux node(%s), skip", pod.Namespace, pod.Name, pod.Spec.NodeName)
skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log)
return nil, pvcSummary, []error{err}
}
err := nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.crClient)
if err != nil {
skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log)

View File

@@ -303,6 +303,14 @@ func createPVBObj(fail bool, withSnapshot bool, index int, uploaderType string)
return pvbObj
}
func createNodeObj() *corev1api.Node {
return builder.ForNode("fake-node-name").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result()
}
func createWindowsNodeObj() *corev1api.Node {
return builder.ForNode("fake-node-name").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result()
}
func TestBackupPodVolumes(t *testing.T) {
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
@@ -358,13 +366,32 @@ func TestBackupPodVolumes(t *testing.T) {
uploaderType: "kopia",
bsl: "fake-bsl",
},
{
name: "pod is not running on Linux node",
volumes: []string{
"fake-volume-1",
"fake-volume-2",
},
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createWindowsNodeObj(),
},
sourcePod: createPodObj(false, false, false, 2),
uploaderType: "kopia",
errs: []string{
"Pod fake-ns/fake-pod is not running in linux node(fake-node-name), skip",
},
},
{
name: "node-agent pod is not running in node",
volumes: []string{
"fake-volume-1",
"fake-volume-2",
},
sourcePod: createPodObj(true, false, false, 2),
sourcePod: createPodObj(true, false, false, 2),
kubeClientObj: []runtime.Object{
createNodeObj(),
},
uploaderType: "kopia",
errs: []string{
"daemonset pod not found in running state in node fake-node-name",
@@ -379,6 +406,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, false, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
},
uploaderType: "kopia",
mockGetRepositoryType: true,
@@ -395,6 +423,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, false, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
},
uploaderType: "kopia",
errs: []string{
@@ -410,6 +439,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, false, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
},
ctlClientObj: []runtime.Object{
createBackupRepoObj(),
@@ -427,6 +457,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, true, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
},
ctlClientObj: []runtime.Object{
createBackupRepoObj(),
@@ -448,6 +479,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, true, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
createPVCObj(1),
createPVCObj(2),
},
@@ -471,6 +503,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, true, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
createPVCObj(1),
createPVCObj(2),
createPVObj(1, true),
@@ -482,6 +515,7 @@ func TestBackupPodVolumes(t *testing.T) {
runtimeScheme: scheme,
uploaderType: "kopia",
bsl: "fake-bsl",
errs: []string{},
},
{
name: "volume not mounted by pod should be skipped",
@@ -492,6 +526,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, true, false, 2),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
createPVCObj(1),
createPVCObj(2),
createPVObj(1, false),
@@ -503,6 +538,7 @@ func TestBackupPodVolumes(t *testing.T) {
runtimeScheme: scheme,
uploaderType: "kopia",
bsl: "fake-bsl",
errs: []string{},
},
{
name: "return completed pvbs",
@@ -512,6 +548,7 @@ func TestBackupPodVolumes(t *testing.T) {
sourcePod: createPodObj(true, true, true, 1),
kubeClientObj: []runtime.Object{
createNodeAgentPodObj(true),
createNodeObj(),
createPVCObj(1),
createPVObj(1, false),
},
@@ -522,6 +559,7 @@ func TestBackupPodVolumes(t *testing.T) {
uploaderType: "kopia",
bsl: "fake-bsl",
pvbs: 1,
errs: []string{},
},
}
// TODO add more verification around PVCBackupSummary returned by "BackupPodVolumes"
@@ -568,8 +606,8 @@ func TestBackupPodVolumes(t *testing.T) {
pvbs, _, errs := bp.BackupPodVolumes(backupObj, test.sourcePod, test.volumes, nil, velerotest.NewLogger())
if errs == nil {
assert.Nil(t, test.errs)
if test.errs == nil {
assert.NoError(t, err)
} else {
for i := 0; i < len(errs); i++ {
assert.EqualError(t, errs[i], test.errs[i])

View File

@@ -122,7 +122,7 @@ func (r *restorer) RestorePodVolumes(data RestoreData, tracker *volume.RestoreVo
return nil
}
if err := nodeagent.IsRunning(r.ctx, r.kubeClient, data.Restore.Namespace); err != nil {
if err := nodeagent.IsRunningOnLinux(r.ctx, r.kubeClient, data.Restore.Namespace); err != nil {
return []error{errors.Wrapf(err, "error to check node agent status")}
}
@@ -213,6 +213,12 @@ func (r *restorer) RestorePodVolumes(data RestoreData, tracker *volume.RestoreVo
} else if err != nil {
r.log.WithError(err).Error("Failed to check node-agent pod status, disengage")
} else {
if err := kube.IsLinuxNode(checkCtx, nodeName, r.crClient); err != nil {
r.log.WithField("node", nodeName).WithError(err).Error("Restored pod is not running in linux node")
r.nodeAgentCheck <- errors.Wrapf(err, "restored pod %s/%s is not running in linux node(%s)", data.Pod.Namespace, data.Pod.Name, nodeName)
return
}
err = nodeagent.IsRunningInNode(checkCtx, data.Restore.Namespace, nodeName, r.crClient)
if err != nil {
r.log.WithField("node", nodeName).WithError(err).Error("node-agent pod is not running in node, abort the restore")

View File

@@ -33,7 +33,6 @@ import (
"k8s.io/client-go/kubernetes"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/vmware-tanzu/velero/internal/volume"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
@@ -314,6 +313,30 @@ func TestRestorePodVolumes(t *testing.T) {
},
},
},
{
name: "pod is not running on linux nodes",
pvbs: []*velerov1api.PodVolumeBackup{
createPVBObj(true, true, 1, "kopia"),
},
kubeClientObj: []runtime.Object{
createNodeAgentDaemonset(),
createWindowsNodeObj(),
createPVCObj(1),
createPodObj(true, true, true, 1),
},
ctlClientObj: []runtime.Object{
createBackupRepoObj(),
},
restoredPod: createPodObj(true, true, true, 1),
sourceNamespace: "fake-ns",
bsl: "fake-bsl",
runtimeScheme: scheme,
errs: []expectError{
{
err: "restored pod fake-ns/fake-pod is not running in linux node(fake-node-name): os type windows for node fake-node-name is not linux",
},
},
},
{
name: "node-agent pod is not running",
pvbs: []*velerov1api.PodVolumeBackup{
@@ -321,6 +344,7 @@ func TestRestorePodVolumes(t *testing.T) {
},
kubeClientObj: []runtime.Object{
createNodeAgentDaemonset(),
createNodeObj(),
createPVCObj(1),
createPodObj(true, true, true, 1),
},
@@ -344,6 +368,7 @@ func TestRestorePodVolumes(t *testing.T) {
},
kubeClientObj: []runtime.Object{
createNodeAgentDaemonset(),
createNodeObj(),
createPVCObj(1),
createPodObj(true, true, true, 1),
createNodeAgentPodObj(true),
@@ -368,11 +393,6 @@ func TestRestorePodVolumes(t *testing.T) {
ctx = test.ctx
}
fakeClientBuilder := ctrlfake.NewClientBuilder()
if test.runtimeScheme != nil {
fakeClientBuilder = fakeClientBuilder.WithScheme(test.runtimeScheme)
}
objClient := append(test.ctlClientObj, test.kubeClientObj...)
objClient = append(objClient, test.veleroClientObj...)
@@ -438,7 +458,8 @@ func TestRestorePodVolumes(t *testing.T) {
for i := 0; i < len(errs); i++ {
j := 0
for ; j < len(test.errs); j++ {
if errs[i].Error() == test.errs[j].err {
err := errs[i].Error()
if err == test.errs[j].err {
break
}
}

View File

@@ -138,7 +138,9 @@ func (p *Progress) HashingFile(fname string) {}
func (p *Progress) ExcludedFile(fname string, numBytes int64) {}
// ExcludedDir statistic the dir been excluded currently
func (p *Progress) ExcludedDir(dirname string) {}
func (p *Progress) ExcludedDir(dirname string) {
p.log.Infof("Excluded dir %s", dirname)
}
// FinishedHashingFile which will called when specific file finished hash
func (p *Progress) FinishedHashingFile(fname string, numBytes int64) {

View File

@@ -127,6 +127,10 @@ func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snap
curPolicy.UploadPolicy.ParallelUploadAboveSize = newOptionalInt64(2 << 30)
}
if runtime.GOOS == "windows" {
curPolicy.FilesPolicy.IgnoreRules = []string{"/System Volume Information/", "/$Recycle.Bin/"}
}
err := setPolicyFunc(ctx, rep, sourceInfo, curPolicy)
if err != nil {
return nil, errors.Wrap(err, "error to set policy")

119
pkg/util/kube/node.go Normal file
View File

@@ -0,0 +1,119 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
NodeOSLinux = "linux"
NodeOSWindows = "windows"
NodeOSLabel = "kubernetes.io/os"
)
func IsLinuxNode(ctx context.Context, nodeName string, client client.Client) error {
node := &corev1api.Node{}
if err := client.Get(ctx, types.NamespacedName{Name: nodeName}, node); err != nil {
return errors.Wrapf(err, "error getting node %s", nodeName)
}
os, found := node.Labels[NodeOSLabel]
if !found {
return errors.Errorf("no os type label for node %s", nodeName)
}
if os != NodeOSLinux {
return errors.Errorf("os type %s for node %s is not linux", os, nodeName)
}
return nil
}
func WithLinuxNode(ctx context.Context, client client.Client, log logrus.FieldLogger) bool {
return withOSNode(ctx, client, NodeOSLinux, log)
}
func WithWindowsNode(ctx context.Context, client client.Client, log logrus.FieldLogger) bool {
return withOSNode(ctx, client, NodeOSWindows, log)
}
func withOSNode(ctx context.Context, client client.Client, osType string, log logrus.FieldLogger) bool {
nodeList := new(corev1api.NodeList)
if err := client.List(ctx, nodeList); err != nil {
log.Warnf("Failed to list nodes, cannot decide existence of nodes of OS %s", osType)
return false
}
allNodeLabeled := true
for _, node := range nodeList.Items {
os, found := node.Labels[NodeOSLabel]
if os == osType {
return true
}
if !found {
allNodeLabeled = false
}
}
if !allNodeLabeled {
log.Warnf("Not all nodes have os type label, cannot decide existence of nodes of OS %s", osType)
}
return false
}
func GetNodeOS(ctx context.Context, nodeName string, nodeClient corev1client.CoreV1Interface) (string, error) {
node, err := nodeClient.Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
if err != nil {
return "", errors.Wrapf(err, "error getting node %s", nodeName)
}
if node.Labels == nil {
return "", nil
}
return node.Labels[NodeOSLabel], nil
}
func HasNodeWithOS(ctx context.Context, os string, nodeClient corev1client.CoreV1Interface) error {
if os == "" {
return errors.New("invalid node OS")
}
nodes, err := nodeClient.Nodes().List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", NodeOSLabel, os)})
if err != nil {
return errors.Wrapf(err, "error listing nodes with OS %s", os)
}
if len(nodes.Items) == 0 {
return errors.Errorf("node with OS %s doesn't exist", os)
}
return nil
}

261
pkg/util/kube/node_test.go Normal file
View File

@@ -0,0 +1,261 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube
import (
"context"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/vmware-tanzu/velero/pkg/builder"
kubeClientFake "k8s.io/client-go/kubernetes/fake"
clientTesting "k8s.io/client-go/testing"
clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
func TestIsLinuxNode(t *testing.T) {
nodeNoOSLabel := builder.ForNode("fake-node").Result()
nodeWindows := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result()
nodeLinux := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result()
scheme := runtime.NewScheme()
corev1.AddToScheme(scheme)
tests := []struct {
name string
kubeClientObj []runtime.Object
err string
}{
{
name: "error getting node",
err: "error getting node fake-node: nodes \"fake-node\" not found",
},
{
name: "no os label",
kubeClientObj: []runtime.Object{
nodeNoOSLabel,
},
err: "no os type label for node fake-node",
},
{
name: "os label does not match",
kubeClientObj: []runtime.Object{
nodeWindows,
},
err: "os type windows for node fake-node is not linux",
},
{
name: "succeed",
kubeClientObj: []runtime.Object{
nodeLinux,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClientBuilder := clientFake.NewClientBuilder()
fakeClientBuilder = fakeClientBuilder.WithScheme(scheme)
fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build()
err := IsLinuxNode(context.TODO(), "fake-node", fakeClient)
if err != nil {
assert.EqualError(t, err, test.err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestWithLinuxNode(t *testing.T) {
nodeWindows := builder.ForNode("fake-node-1").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result()
nodeLinux := builder.ForNode("fake-node-2").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result()
scheme := runtime.NewScheme()
corev1.AddToScheme(scheme)
tests := []struct {
name string
kubeClientObj []runtime.Object
result bool
}{
{
name: "error listing node",
},
{
name: "with node of other type",
kubeClientObj: []runtime.Object{
nodeWindows,
},
},
{
name: "with node of the same type",
kubeClientObj: []runtime.Object{
nodeWindows,
nodeLinux,
},
result: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClientBuilder := clientFake.NewClientBuilder()
fakeClientBuilder = fakeClientBuilder.WithScheme(scheme)
fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build()
result := withOSNode(context.TODO(), fakeClient, "linux", velerotest.NewLogger())
assert.Equal(t, test.result, result)
})
}
}
func TestGetNodeOSType(t *testing.T) {
nodeNoOSLabel := builder.ForNode("fake-node").Result()
nodeWindows := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result()
nodeLinux := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result()
scheme := runtime.NewScheme()
corev1.AddToScheme(scheme)
tests := []struct {
name string
kubeClientObj []runtime.Object
err string
expectedOSType string
}{
{
name: "error getting node",
err: "error getting node fake-node: nodes \"fake-node\" not found",
},
{
name: "no os label",
kubeClientObj: []runtime.Object{
nodeNoOSLabel,
},
},
{
name: "windows node",
kubeClientObj: []runtime.Object{
nodeWindows,
},
expectedOSType: "windows",
},
{
name: "linux node",
kubeClientObj: []runtime.Object{
nodeLinux,
},
expectedOSType: "linux",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := kubeClientFake.NewSimpleClientset(test.kubeClientObj...)
osType, err := GetNodeOS(context.TODO(), "fake-node", fakeKubeClient.CoreV1())
if err != nil {
assert.EqualError(t, err, test.err)
} else {
assert.Equal(t, test.expectedOSType, osType)
}
})
}
}
func TestHasNodeWithOS(t *testing.T) {
nodeNoOSLabel := builder.ForNode("fake-node-1").Result()
nodeWindows := builder.ForNode("fake-node-2").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result()
nodeLinux := builder.ForNode("fake-node-3").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result()
scheme := runtime.NewScheme()
corev1.AddToScheme(scheme)
tests := []struct {
name string
kubeClientObj []runtime.Object
kubeReactors []reactor
os string
err string
}{
{
name: "os is empty",
err: "invalid node OS",
},
{
name: "error to list node",
kubeReactors: []reactor{
{
verb: "list",
resource: "nodes",
reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("fake-list-error")
},
},
},
os: "linux",
err: "error listing nodes with OS linux: fake-list-error",
},
{
name: "no expected node - no node",
os: "linux",
err: "node with OS linux doesn't exist",
},
{
name: "no expected node - no node with label",
kubeClientObj: []runtime.Object{
nodeNoOSLabel,
nodeWindows,
},
os: "linux",
err: "node with OS linux doesn't exist",
},
{
name: "succeed",
kubeClientObj: []runtime.Object{
nodeNoOSLabel,
nodeWindows,
nodeLinux,
},
os: "windows",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := kubeClientFake.NewSimpleClientset(test.kubeClientObj...)
for _, reactor := range test.kubeReactors {
fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc)
}
err := HasNodeWithOS(context.TODO(), test.os, fakeKubeClient.CoreV1())
if test.err != "" {
assert.EqualError(t, err, test.err)
} else {
assert.NoError(t, err)
}
})
}
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
jsonpatch "github.com/evanphx/json-patch/v5"
@@ -429,3 +430,47 @@ func DiagnosePV(pv *corev1api.PersistentVolume) string {
diag := fmt.Sprintf("PV %s, phase %s, reason %s, message %s\n", pv.Name, pv.Status.Phase, pv.Status.Reason, pv.Status.Message)
return diag
}
func GetPVCAttachingNodeOS(pvc *corev1api.PersistentVolumeClaim, nodeClient corev1client.CoreV1Interface,
storageClient storagev1.StorageV1Interface, log logrus.FieldLogger) (string, error) {
var nodeOS string
var scFsType string
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1api.PersistentVolumeBlock {
log.Infof("Use linux node for block mode PVC %s/%s", pvc.Namespace, pvc.Name)
return NodeOSLinux, nil
}
if value := pvc.Annotations[KubeAnnSelectedNode]; value != "" {
os, err := GetNodeOS(context.Background(), value, nodeClient)
if err != nil {
return "", errors.Wrapf(err, "error to get os from node %s for PVC %s/%s", value, pvc.Namespace, pvc.Name)
}
nodeOS = os
}
if pvc.Spec.StorageClassName != nil {
sc, err := storageClient.StorageClasses().Get(context.Background(), *pvc.Spec.StorageClassName, metav1.GetOptions{})
if err != nil {
return "", errors.Wrapf(err, "error to get storage class %s", *pvc.Spec.StorageClassName)
}
if sc.Parameters != nil {
scFsType = strings.ToLower(sc.Parameters["csi.storage.k8s.io/fstype"])
}
}
if nodeOS != "" {
log.Infof("Deduced node os %s from selected node for PVC %s/%s (fsType %s)", nodeOS, pvc.Namespace, pvc.Name, scFsType)
return nodeOS, nil
}
if scFsType == "ntfs" {
log.Infof("Deduced Windows node os from fsType for PVC %s/%s", pvc.Namespace, pvc.Name)
return NodeOSWindows, nil
}
log.Warnf("Cannot deduce node os for PVC %s/%s, default to linux", pvc.Namespace, pvc.Name)
return NodeOSLinux, nil
}

View File

@@ -33,6 +33,7 @@ import (
clientTesting "k8s.io/client-go/testing"
"github.com/vmware-tanzu/velero/pkg/builder"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
@@ -1561,3 +1562,149 @@ func TestDiagnosePV(t *testing.T) {
})
}
}
func TestGetPVCAttachingNodeOS(t *testing.T) {
storageClass := "fake-storage-class"
nodeNoOSLabel := builder.ForNode("fake-node").Result()
nodeWindows := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result()
pvcObj := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-namespace",
Name: "fake-pvc",
},
}
blockMode := corev1api.PersistentVolumeBlock
pvcObjBlockMode := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-namespace",
Name: "fake-pvc",
},
Spec: corev1api.PersistentVolumeClaimSpec{
VolumeMode: &blockMode,
},
}
pvcObjWithNode := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-namespace",
Name: "fake-pvc",
Annotations: map[string]string{KubeAnnSelectedNode: "fake-node"},
},
}
pvcObjWithStorageClass := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-namespace",
Name: "fake-pvc",
},
Spec: corev1api.PersistentVolumeClaimSpec{
StorageClassName: &storageClass,
},
}
pvcObjWithBoth := &corev1api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-namespace",
Name: "fake-pvc",
Annotations: map[string]string{KubeAnnSelectedNode: "fake-node"},
},
Spec: corev1api.PersistentVolumeClaimSpec{
StorageClassName: &storageClass,
},
}
scObjWithoutFSType := &storagev1api.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-storage-class",
},
}
scObjWithFSType := &storagev1api.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-storage-class",
},
Parameters: map[string]string{"csi.storage.k8s.io/fstype": "ntfs"},
}
tests := []struct {
name string
pvc *corev1api.PersistentVolumeClaim
kubeClientObj []runtime.Object
expectedNodeOS string
err string
}{
{
name: "no selected node and storage class",
pvc: pvcObj,
expectedNodeOS: NodeOSLinux,
},
{
name: "node doesn't exist",
pvc: pvcObjWithNode,
err: "error to get os from node fake-node for PVC fake-namespace/fake-pvc: error getting node fake-node: nodes \"fake-node\" not found",
},
{
name: "node without os label",
pvc: pvcObjWithNode,
kubeClientObj: []runtime.Object{
nodeNoOSLabel,
},
expectedNodeOS: NodeOSLinux,
},
{
name: "sc doesn't exist",
pvc: pvcObjWithStorageClass,
err: "error to get storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found",
},
{
name: "sc without fsType",
pvc: pvcObjWithStorageClass,
kubeClientObj: []runtime.Object{
scObjWithoutFSType,
},
expectedNodeOS: NodeOSLinux,
},
{
name: "deduce from node os",
pvc: pvcObjWithBoth,
kubeClientObj: []runtime.Object{
nodeWindows,
scObjWithFSType,
},
expectedNodeOS: NodeOSWindows,
},
{
name: "deduce from sc",
pvc: pvcObjWithBoth,
kubeClientObj: []runtime.Object{
nodeNoOSLabel,
scObjWithFSType,
},
expectedNodeOS: NodeOSWindows,
},
{
name: "block access",
pvc: pvcObjBlockMode,
expectedNodeOS: NodeOSLinux,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
var kubeClient kubernetes.Interface = fakeKubeClient
nodeOS, err := GetPVCAttachingNodeOS(test.pvc, kubeClient.CoreV1(), kubeClient.StorageV1(), velerotest.NewLogger())
if err != nil {
assert.EqualError(t, err, test.err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, test.expectedNodeOS, nodeOS)
})
}
}

View File

@@ -67,8 +67,12 @@ func MigrationWithFS() {
}
func (m *migrationE2E) Init() error {
By("Call the base E2E init", func() {
Expect(m.TestCase.Init()).To(Succeed())
})
By("Skip check", func() {
if m.VeleroCfg.DefaultClusterContext == "" && m.VeleroCfg.StandbyClusterContext == "" {
if m.VeleroCfg.DefaultClusterContext == "" || m.VeleroCfg.StandbyClusterContext == "" {
Skip("Migration test needs 2 clusters")
}
@@ -81,10 +85,6 @@ func (m *migrationE2E) Init() error {
}
})
By("Call the base E2E init", func() {
Expect(m.TestCase.Init()).To(Succeed())
})
m.kibishiiData = *kibishii.DefaultKibishiiData
m.kibishiiData.ExpectedNodes = 3
m.CaseBaseName = "migration-" + m.UUIDgen

View File

@@ -23,6 +23,7 @@ import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/pkg/errors"
@@ -413,6 +414,15 @@ func createVeleroResources(ctx context.Context, cli, namespace string, args []st
return errors.Wrapf(err, "failed to run velero install dry run command, stdout=%s, stderr=%s", stdout, stderr)
}
// From v1.15, the Restic uploader is deprecated,
// and a warning message is printed for the install CLI.
// Need to skip the deprecation of Restic message before the generated JSON.
// Redirect to the stdout to the first curly bracket to skip the warning.
if stdout[0] != '{' {
newIndex := strings.Index(stdout, "{")
stdout = stdout[newIndex:]
}
resources := &unstructured.UnstructuredList{}
if err := json.Unmarshal([]byte(stdout), resources); err != nil {
return errors.Wrapf(err, "failed to unmarshal the resources: %s", stdout)