mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-04 20:24:02 +00:00
Merge pull request #8504 from Lyndon-Li/linux-windows-hybrid-deploy
Linux windows hybrid deploy
This commit is contained in:
1
changelogs/unreleased/8504-Lyndon-Li
Normal file
1
changelogs/unreleased/8504-Lyndon-Li
Normal file
@@ -0,0 +1 @@
|
||||
Fix issue #8416, #8417, deploy Velero server and node-agent in linux/Windows hybrid env
|
||||
@@ -66,6 +66,7 @@ type Options struct {
|
||||
BackupStorageConfig flag.Map
|
||||
VolumeSnapshotConfig flag.Map
|
||||
UseNodeAgent bool
|
||||
UseNodeAgentWindows bool
|
||||
PrivilegedNodeAgent bool
|
||||
Wait bool
|
||||
UseVolumeSnapshots bool
|
||||
@@ -117,7 +118,8 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&o.UseVolumeSnapshots, "use-volume-snapshots", o.UseVolumeSnapshots, "Whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider.")
|
||||
flags.BoolVar(&o.RestoreOnly, "restore-only", o.RestoreOnly, "Run the server in restore-only mode. Optional.")
|
||||
flags.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Generate resources, but don't send them to the cluster. Use with -o. Optional.")
|
||||
flags.BoolVar(&o.UseNodeAgent, "use-node-agent", o.UseNodeAgent, "Create Velero node-agent daemonset. Optional. Velero node-agent hosts Velero modules that need to run in one or more nodes(i.e. Restic, Kopia).")
|
||||
flags.BoolVar(&o.UseNodeAgent, "use-node-agent", o.UseNodeAgent, "Create Velero node-agent daemonset. Optional. Velero node-agent hosts and associates Velero modules that need to run in one or more Linux nodes.")
|
||||
flags.BoolVar(&o.UseNodeAgentWindows, "use-node-agent-windows", o.UseNodeAgentWindows, "Create Velero node-agent-windows daemonset. Optional. Velero node-agent-windows hosts and associates Velero modules that need to run in one or more Windows nodes.")
|
||||
flags.BoolVar(&o.PrivilegedNodeAgent, "privileged-node-agent", o.PrivilegedNodeAgent, "Use privileged mode for the node agent. Optional. Required to backup block devices.")
|
||||
flags.BoolVar(&o.Wait, "wait", o.Wait, "Wait for Velero deployment to be ready. Optional.")
|
||||
flags.DurationVar(&o.DefaultRepoMaintenanceFrequency, "default-repo-maintain-frequency", o.DefaultRepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default. Optional.")
|
||||
@@ -267,6 +269,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) {
|
||||
SecretData: secretData,
|
||||
RestoreOnly: o.RestoreOnly,
|
||||
UseNodeAgent: o.UseNodeAgent,
|
||||
UseNodeAgentWindows: o.UseNodeAgentWindows,
|
||||
PrivilegedNodeAgent: o.PrivilegedNodeAgent,
|
||||
UseVolumeSnapshots: o.UseVolumeSnapshots,
|
||||
BSLConfig: o.BackupStorageConfig.Data(),
|
||||
@@ -392,7 +395,12 @@ func (o *Options) Run(c *cobra.Command, f client.Factory) error {
|
||||
|
||||
if o.UseNodeAgent {
|
||||
fmt.Println("Waiting for node-agent daemonset to be ready.")
|
||||
if _, err = install.DaemonSetIsReady(dynamicFactory, o.Namespace); err != nil {
|
||||
if _, err = install.NodeAgentIsReady(dynamicFactory, o.Namespace); err != nil {
|
||||
return errors.Wrap(err, errorMsg)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for node-agent-windows daemonset to be ready.")
|
||||
if _, err = install.NodeAgentWindowsIsReady(dynamicFactory, o.Namespace); err != nil {
|
||||
return errors.Wrap(err, errorMsg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,13 +200,31 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
},
|
||||
},
|
||||
}
|
||||
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Cache: cacheOption,
|
||||
})
|
||||
|
||||
var mgr manager.Manager
|
||||
retry := 10
|
||||
for {
|
||||
mgr, err = ctrl.NewManager(clientConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Cache: cacheOption,
|
||||
})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
retry--
|
||||
if retry == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
logger.WithError(err).Warn("Failed to create controller manager, need retry")
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "error creating controller manager")
|
||||
}
|
||||
|
||||
s := &nodeAgentServer{
|
||||
|
||||
@@ -239,17 +239,34 @@ func newServer(f client.Factory, config *config.Config, logger *logrus.Logger) (
|
||||
|
||||
ctrl.SetLogger(logrusr.New(logger))
|
||||
|
||||
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Cache: cache.Options{
|
||||
DefaultNamespaces: map[string]cache.Config{
|
||||
f.Namespace(): {},
|
||||
var mgr manager.Manager
|
||||
retry := 10
|
||||
for {
|
||||
mgr, err = ctrl.NewManager(clientConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Cache: cache.Options{
|
||||
DefaultNamespaces: map[string]cache.Config{
|
||||
f.Namespace(): {},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
retry--
|
||||
if retry == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
logger.WithError(err).Warn("Failed to create controller manager, need retry")
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "error creating controller manager")
|
||||
}
|
||||
|
||||
credentialFileStore, err := credentials.NewNamespacedFileStore(
|
||||
|
||||
@@ -1146,7 +1146,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "node-agent-pod-1",
|
||||
Labels: map[string]string{"name": "node-agent"},
|
||||
Labels: map[string]string{"role": "node-agent"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
|
||||
@@ -627,7 +627,7 @@ func Test_ReastoreDiagnoseExpose(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "node-agent-pod-1",
|
||||
Labels: map[string]string{"name": "node-agent"},
|
||||
Labels: map[string]string{"role": "node-agent"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
|
||||
@@ -57,8 +57,13 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
userID := int64(0)
|
||||
mountPropagationMode := corev1.MountPropagationHostToContainer
|
||||
|
||||
dsName := "node-agent"
|
||||
if c.forWindows {
|
||||
dsName = "node-agent-windows"
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
ObjectMeta: objectMeta(namespace, "node-agent"),
|
||||
ObjectMeta: objectMeta(namespace, dsName),
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
@@ -66,13 +71,14 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": "node-agent",
|
||||
"name": dsName,
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels(c.labels, map[string]string{
|
||||
"name": "node-agent",
|
||||
"name": dsName,
|
||||
"role": "node-agent",
|
||||
}),
|
||||
Annotations: c.annotations,
|
||||
},
|
||||
@@ -107,7 +113,7 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-agent",
|
||||
Name: dsName,
|
||||
Image: c.image,
|
||||
Ports: containerPorts(),
|
||||
ImagePullPolicy: pullPolicy,
|
||||
@@ -205,6 +211,24 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
}...)
|
||||
}
|
||||
|
||||
if c.forWindows {
|
||||
daemonSet.Spec.Template.Spec.SecurityContext = nil
|
||||
daemonSet.Spec.Template.Spec.Containers[0].SecurityContext = nil
|
||||
daemonSet.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1.PodOS{
|
||||
Name: "windows",
|
||||
}
|
||||
} else {
|
||||
daemonSet.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "linux",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1.PodOS{
|
||||
Name: "linux",
|
||||
}
|
||||
}
|
||||
|
||||
daemonSet.Spec.Template.Spec.Containers[0].Env = append(daemonSet.Spec.Template.Spec.Containers[0].Env, c.envVars...)
|
||||
|
||||
return daemonSet
|
||||
|
||||
@@ -24,10 +24,23 @@ import (
|
||||
)
|
||||
|
||||
func TestDaemonSet(t *testing.T) {
|
||||
userID := int64(0)
|
||||
boolFalse := false
|
||||
boolTrue := true
|
||||
|
||||
ds := DaemonSet("velero")
|
||||
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "velero", ds.ObjectMeta.Namespace)
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["name"])
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["role"])
|
||||
assert.Equal(t, "linux", ds.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "linux", string(ds.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, corev1.PodSecurityContext{RunAsUser: &userID}, *ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, corev1.SecurityContext{Privileged: &boolFalse}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
|
||||
ds = DaemonSet("velero", WithPrivilegedNodeAgent(true))
|
||||
assert.Equal(t, corev1.SecurityContext{Privileged: &boolTrue}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
|
||||
ds = DaemonSet("velero", WithImage("velero/velero:v0.11"))
|
||||
assert.Equal(t, "velero/velero:v0.11", ds.Spec.Template.Spec.Containers[0].Image)
|
||||
@@ -47,4 +60,14 @@ func TestDaemonSet(t *testing.T) {
|
||||
|
||||
ds = DaemonSet("velero", WithServiceAccountName("test-sa"))
|
||||
assert.Equal(t, "test-sa", ds.Spec.Template.Spec.ServiceAccountName)
|
||||
|
||||
ds = DaemonSet("velero", WithForWindows())
|
||||
assert.Equal(t, "node-agent-windows", ds.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "velero", ds.ObjectMeta.Namespace)
|
||||
assert.Equal(t, "node-agent-windows", ds.Spec.Template.ObjectMeta.Labels["name"])
|
||||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["role"])
|
||||
assert.Equal(t, "windows", ds.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "windows", string(ds.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, (*corev1.PodSecurityContext)(nil), ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, (*corev1.SecurityContext)(nil), ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ type podTemplateConfig struct {
|
||||
repoMaintenanceJobConfigMap string
|
||||
nodeAgentConfigMap string
|
||||
itemBlockWorkerCount int
|
||||
forWindows bool
|
||||
}
|
||||
|
||||
func WithImage(image string) podTemplateOption {
|
||||
@@ -219,6 +220,12 @@ func WithItemBlockWorkerCount(itemBlockWorkerCount int) podTemplateOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithForWindows() podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.forWindows = true
|
||||
}
|
||||
}
|
||||
|
||||
func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment {
|
||||
// TODO: Add support for server args
|
||||
c := &podTemplateConfig{
|
||||
@@ -324,6 +331,12 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyAlways,
|
||||
ServiceAccountName: c.serviceAccountName,
|
||||
NodeSelector: map[string]string{
|
||||
"kubernetes.io/os": "linux",
|
||||
},
|
||||
OS: &corev1.PodOS{
|
||||
Name: "linux",
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "velero",
|
||||
|
||||
@@ -99,4 +99,7 @@ func TestDeployment(t *testing.T) {
|
||||
deploy = Deployment("velero", WithRepoMaintenanceJobConfigMap("test-repo-maintenance-config"))
|
||||
assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 2)
|
||||
assert.Equal(t, "--repo-maintenance-job-configmap=test-repo-maintenance-config", deploy.Spec.Template.Spec.Containers[0].Args[1])
|
||||
|
||||
assert.Equal(t, "linux", deploy.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "linux", string(deploy.Spec.Template.Spec.OS.Name))
|
||||
}
|
||||
|
||||
@@ -206,9 +206,19 @@ func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, e
|
||||
return isReady, err
|
||||
}
|
||||
|
||||
// DaemonSetIsReady will poll the Kubernetes API server to ensure the node-agent daemonset is ready, i.e. that
|
||||
// NodeAgentIsReady will poll the Kubernetes API server to ensure the node-agent daemonset is ready, i.e. that
|
||||
// pods are scheduled and available on all of the desired nodes.
|
||||
func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, error) {
|
||||
func NodeAgentIsReady(factory client.DynamicFactory, namespace string) (bool, error) {
|
||||
return daemonSetIsReady(factory, namespace, "node-agent")
|
||||
}
|
||||
|
||||
// NodeAgentWindowsIsReady will poll the Kubernetes API server to ensure the node-agent-windows daemonset is ready, i.e. that
|
||||
// pods are scheduled and available on all of the desired nodes.
|
||||
func NodeAgentWindowsIsReady(factory client.DynamicFactory, namespace string) (bool, error) {
|
||||
return daemonSetIsReady(factory, namespace, "node-agent-windows")
|
||||
}
|
||||
|
||||
func daemonSetIsReady(factory client.DynamicFactory, namespace string, name string) (bool, error) {
|
||||
gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "DaemonSet")
|
||||
apiResource := metav1.APIResource{
|
||||
Name: "daemonsets",
|
||||
@@ -225,7 +235,7 @@ func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, er
|
||||
var readyObservations int32
|
||||
|
||||
err = wait.PollUntilContextTimeout(context.Background(), time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
unstructuredDaemonSet, err := c.Get("node-agent", metav1.GetOptions{})
|
||||
unstructuredDaemonSet, err := c.Get(name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestDeploymentIsReady(t *testing.T) {
|
||||
assert.True(t, ready)
|
||||
}
|
||||
|
||||
func TestDaemonSetIsReady(t *testing.T) {
|
||||
func TestNodeAgentIsReady(t *testing.T) {
|
||||
daemonset := &appsv1.DaemonSet{
|
||||
Status: appsv1.DaemonSetStatus{
|
||||
NumberAvailable: 1,
|
||||
@@ -143,7 +143,28 @@ func TestDaemonSetIsReady(t *testing.T) {
|
||||
factory := &test.FakeDynamicFactory{}
|
||||
factory.On("ClientForGroupVersionResource", mock.Anything, mock.Anything, mock.Anything).Return(dc, nil)
|
||||
|
||||
ready, err := DaemonSetIsReady(factory, "velero")
|
||||
ready, err := NodeAgentIsReady(factory, "velero")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, ready)
|
||||
}
|
||||
|
||||
func TestNodeAgentWindowsIsReady(t *testing.T) {
|
||||
daemonset := &appsv1.DaemonSet{
|
||||
Status: appsv1.DaemonSetStatus{
|
||||
NumberAvailable: 0,
|
||||
DesiredNumberScheduled: 0,
|
||||
},
|
||||
}
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(daemonset)
|
||||
require.NoError(t, err)
|
||||
|
||||
dc := &test.FakeDynamicClient{}
|
||||
dc.On("Get", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: obj}, nil)
|
||||
|
||||
factory := &test.FakeDynamicFactory{}
|
||||
factory.On("ClientForGroupVersionResource", mock.Anything, mock.Anything, mock.Anything).Return(dc, nil)
|
||||
|
||||
ready, err := NodeAgentWindowsIsReady(factory, "velero")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, ready)
|
||||
}
|
||||
|
||||
@@ -246,6 +246,7 @@ type VeleroOptions struct {
|
||||
SecretData []byte
|
||||
RestoreOnly bool
|
||||
UseNodeAgent bool
|
||||
UseNodeAgentWindows bool
|
||||
PrivilegedNodeAgent bool
|
||||
UseVolumeSnapshots bool
|
||||
BSLConfig map[string]string
|
||||
@@ -395,7 +396,7 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
|
||||
fmt.Printf("error appending Deployment %s: %s\n", deploy.GetName(), err.Error())
|
||||
}
|
||||
|
||||
if o.UseNodeAgent {
|
||||
if o.UseNodeAgent || o.UseNodeAgentWindows {
|
||||
dsOpts := []podTemplateOption{
|
||||
WithAnnotations(o.PodAnnotations),
|
||||
WithLabels(o.PodLabels),
|
||||
@@ -414,9 +415,20 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
|
||||
dsOpts = append(dsOpts, WithNodeAgentConfigMap(o.NodeAgentConfigMap))
|
||||
}
|
||||
|
||||
ds := DaemonSet(o.Namespace, dsOpts...)
|
||||
if err := appendUnstructured(resources, ds); err != nil {
|
||||
fmt.Printf("error appending DaemonSet %s: %s\n", ds.GetName(), err.Error())
|
||||
if o.UseNodeAgent {
|
||||
ds := DaemonSet(o.Namespace, dsOpts...)
|
||||
if err := appendUnstructured(resources, ds); err != nil {
|
||||
fmt.Printf("error appending DaemonSet %s: %s\n", ds.GetName(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if o.UseNodeAgentWindows {
|
||||
dsOpts = append(dsOpts, WithForWindows())
|
||||
|
||||
dsWin := DaemonSet(o.Namespace, dsOpts...)
|
||||
if err := appendUnstructured(resources, dsWin); err != nil {
|
||||
fmt.Printf("error appending DaemonSet %s: %s\n", dsWin.GetName(), err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -77,21 +77,22 @@ func TestAllCRDs(t *testing.T) {
|
||||
|
||||
func TestAllResources(t *testing.T) {
|
||||
option := &VeleroOptions{
|
||||
Namespace: "velero",
|
||||
SecretData: []byte{'a'},
|
||||
UseVolumeSnapshots: true,
|
||||
UseNodeAgent: true,
|
||||
Namespace: "velero",
|
||||
SecretData: []byte{'a'},
|
||||
UseVolumeSnapshots: true,
|
||||
UseNodeAgent: true,
|
||||
UseNodeAgentWindows: true,
|
||||
}
|
||||
list := AllResources(option)
|
||||
|
||||
objects := map[string]unstructured.Unstructured{}
|
||||
objects := map[string][]unstructured.Unstructured{}
|
||||
for _, item := range list.Items {
|
||||
objects[item.GetKind()] = item
|
||||
objects[item.GetKind()] = append(objects[item.GetKind()], item)
|
||||
}
|
||||
|
||||
ns, exist := objects["Namespace"]
|
||||
require.True(t, exist)
|
||||
assert.Equal(t, "velero", ns.GetName())
|
||||
assert.Equal(t, "velero", ns[0].GetName())
|
||||
|
||||
_, exist = objects["ClusterRoleBinding"]
|
||||
assert.True(t, exist)
|
||||
@@ -111,6 +112,8 @@ func TestAllResources(t *testing.T) {
|
||||
_, exist = objects["Deployment"]
|
||||
assert.True(t, exist)
|
||||
|
||||
_, exist = objects["DaemonSet"]
|
||||
ds, exist := objects["DaemonSet"]
|
||||
assert.True(t, exist)
|
||||
|
||||
assert.Len(t, ds, 2)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,9 @@ import (
|
||||
const (
|
||||
// daemonSet is the name of the Velero node agent daemonset.
|
||||
daemonSet = "node-agent"
|
||||
|
||||
// nodeAgentRole marks pods with node-agent role on all nodes.
|
||||
nodeAgentRole = "node-agent"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -116,7 +119,7 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC
|
||||
}
|
||||
|
||||
pods := new(v1.PodList)
|
||||
parsedSelector, err := labels.Parse(fmt.Sprintf("name=%s", daemonSet))
|
||||
parsedSelector, err := labels.Parse(fmt.Sprintf("role=%s", nodeAgentRole))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to parse selector")
|
||||
}
|
||||
@@ -128,7 +131,7 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list daemonset pods")
|
||||
return errors.Wrap(err, "failed to list node-agent pods")
|
||||
}
|
||||
|
||||
for i := range pods.Items {
|
||||
|
||||
@@ -108,11 +108,11 @@ func TestIsRunningInNode(t *testing.T) {
|
||||
corev1.AddToScheme(scheme)
|
||||
|
||||
nonNodeAgentPod := builder.ForPod("fake-ns", "fake-pod").Result()
|
||||
nodeAgentPodNotRunning := builder.ForPod("fake-ns", "fake-pod").Labels(map[string]string{"name": "node-agent"}).Result()
|
||||
nodeAgentPodRunning1 := builder.ForPod("fake-ns", "fake-pod-1").Labels(map[string]string{"name": "node-agent"}).Phase(corev1.PodRunning).Result()
|
||||
nodeAgentPodRunning2 := builder.ForPod("fake-ns", "fake-pod-2").Labels(map[string]string{"name": "node-agent"}).Phase(corev1.PodRunning).Result()
|
||||
nodeAgentPodNotRunning := builder.ForPod("fake-ns", "fake-pod").Labels(map[string]string{"role": "node-agent"}).Result()
|
||||
nodeAgentPodRunning1 := builder.ForPod("fake-ns", "fake-pod-1").Labels(map[string]string{"role": "node-agent"}).Phase(corev1.PodRunning).Result()
|
||||
nodeAgentPodRunning2 := builder.ForPod("fake-ns", "fake-pod-2").Labels(map[string]string{"role": "node-agent"}).Phase(corev1.PodRunning).Result()
|
||||
nodeAgentPodRunning3 := builder.ForPod("fake-ns", "fake-pod-3").
|
||||
Labels(map[string]string{"name": "node-agent"}).
|
||||
Labels(map[string]string{"role": "node-agent"}).
|
||||
Phase(corev1.PodRunning).
|
||||
NodeName("fake-node").
|
||||
Result()
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -153,6 +154,7 @@ func (r *registry) readPluginsDir(dir string) ([]string, error) {
|
||||
}
|
||||
|
||||
if !executable(file) {
|
||||
r.logger.Warnf("Searching plugin skip file %s, not executable, mode %v, ext %s", file.Name(), file.Mode(), strings.ToLower(filepath.Ext(file.Name())))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -163,6 +165,15 @@ func (r *registry) readPluginsDir(dir string) ([]string, error) {
|
||||
|
||||
// executable determines if a file is executable.
|
||||
func executable(info os.FileInfo) bool {
|
||||
return executableLinux(info) || executableWindows(info)
|
||||
}
|
||||
|
||||
func executableWindows(info os.FileInfo) bool {
|
||||
ext := strings.ToLower(filepath.Ext(info.Name()))
|
||||
return (ext == ".exe")
|
||||
}
|
||||
|
||||
func executableLinux(info os.FileInfo) bool {
|
||||
/*
|
||||
When we AND the mode with 0111:
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@ func TestNewRegistry(t *testing.T) {
|
||||
|
||||
type fakeFileInfo struct {
|
||||
fs.FileInfo
|
||||
name string
|
||||
mode os.FileMode
|
||||
}
|
||||
|
||||
@@ -52,9 +53,14 @@ func (f *fakeFileInfo) Mode() os.FileMode {
|
||||
return f.mode
|
||||
}
|
||||
|
||||
func (f *fakeFileInfo) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func TestExecutable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fileName string
|
||||
mode uint32
|
||||
expectExecutable bool
|
||||
}{
|
||||
@@ -90,11 +96,29 @@ func TestExecutable(t *testing.T) {
|
||||
mode: 0777,
|
||||
expectExecutable: true,
|
||||
},
|
||||
{
|
||||
name: "windows lower case",
|
||||
fileName: "test.exe",
|
||||
mode: 0,
|
||||
expectExecutable: true,
|
||||
},
|
||||
{
|
||||
name: "windows upper case",
|
||||
fileName: "test.EXE",
|
||||
mode: 0,
|
||||
expectExecutable: true,
|
||||
},
|
||||
{
|
||||
name: "windows wrong ext",
|
||||
fileName: "test.EXE1",
|
||||
mode: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := &fakeFileInfo{
|
||||
name: test.fileName,
|
||||
mode: os.FileMode(test.mode),
|
||||
}
|
||||
|
||||
@@ -114,7 +138,9 @@ func TestReadPluginsDir(t *testing.T) {
|
||||
WithFileAndMode("/plugins/nonexecutable2", []byte("plugin2"), 0644).
|
||||
WithFileAndMode("/plugins/executable3", []byte("plugin3"), 0755).
|
||||
WithFileAndMode("/plugins/nested/executable4", []byte("plugin4"), 0755).
|
||||
WithFileAndMode("/plugins/nested/nonexecutable5", []byte("plugin4"), 0644)
|
||||
WithFileAndMode("/plugins/nested/nonexecutable5", []byte("plugin4"), 0644).
|
||||
WithFileAndMode("/plugins/nested/win-exe1.exe", []byte("plugin4"), 0600).
|
||||
WithFileAndMode("/plugins/nested/WIN-EXE2.EXE", []byte("plugin4"), 0600)
|
||||
|
||||
plugins, err := r.readPluginsDir(dir)
|
||||
require.NoError(t, err)
|
||||
@@ -123,6 +149,8 @@ func TestReadPluginsDir(t *testing.T) {
|
||||
"/plugins/executable1",
|
||||
"/plugins/executable3",
|
||||
"/plugins/nested/executable4",
|
||||
"/plugins/nested/win-exe1.exe",
|
||||
"/plugins/nested/WIN-EXE2.EXE",
|
||||
}
|
||||
|
||||
sort.Strings(plugins)
|
||||
|
||||
@@ -260,7 +260,7 @@ func createPodObj(running bool, withVolume bool, withVolumeMounted bool, volumeN
|
||||
|
||||
func createNodeAgentPodObj(running bool) *corev1api.Pod {
|
||||
podObj := builder.ForPod(velerov1api.DefaultNamespace, "fake-node-agent").Result()
|
||||
podObj.Labels = map[string]string{"name": "node-agent"}
|
||||
podObj.Labels = map[string]string{"role": "node-agent"}
|
||||
|
||||
if running {
|
||||
podObj.Status.Phase = corev1api.PodRunning
|
||||
|
||||
Reference in New Issue
Block a user