fix node-agent node detection logic

Add namespace in ListOptions, to fix node-agent node detection
in its deployed namespace.

Signed-off-by: Adam Zhang <adam.zhang@broadcom.com>
This commit is contained in:
Adam Zhang
2026-04-03 13:15:00 +08:00
parent 856f1296fc
commit ea057e42fa
3 changed files with 42 additions and 10 deletions

View File

@@ -0,0 +1 @@
Fix issue #9666, fix node-agent node detection in multiple instances scenario

View File

@@ -97,7 +97,10 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC
}
if crClient != nil {
err = crClient.List(ctx, pods, &ctrlclient.ListOptions{LabelSelector: parsedSelector})
err = crClient.List(ctx, pods, &ctrlclient.ListOptions{
LabelSelector: parsedSelector,
Namespace: namespace,
})
} else {
pods, err = kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: parsedSelector.String()})
}

View File

@@ -118,28 +118,37 @@ func TestIsRunningInNode(t *testing.T) {
Phase(corev1api.PodRunning).
NodeName("fake-node").
Result()
nodeAgentPodOtherNs := builder.ForPod("other-ns", "fake-pod-other").
Labels(map[string]string{"role": "node-agent"}).
Phase(corev1api.PodRunning).
NodeName("fake-node").
Result()
tests := []struct {
name string
kubeClientObj []runtime.Object
namespace string
nodeName string
expectErr string
}{
{
name: "node name is empty",
namespace: "fake-ns",
expectErr: "node name is empty",
},
{
name: "ds pod not found",
nodeName: "fake-node",
name: "ds pod not found",
namespace: "fake-ns",
nodeName: "fake-node",
kubeClientObj: []runtime.Object{
nonNodeAgentPod,
},
expectErr: "daemonset pod not found in running state in node fake-node",
},
{
name: "ds po are not all running",
nodeName: "fake-node",
name: "ds po are not all running",
namespace: "fake-ns",
nodeName: "fake-node",
kubeClientObj: []runtime.Object{
nodeAgentPodNotRunning,
nodeAgentPodRunning1,
@@ -147,8 +156,9 @@ func TestIsRunningInNode(t *testing.T) {
expectErr: "daemonset pod not found in running state in node fake-node",
},
{
name: "ds pods wrong node name",
nodeName: "fake-node",
name: "ds pods wrong node name",
namespace: "fake-ns",
nodeName: "fake-node",
kubeClientObj: []runtime.Object{
nodeAgentPodNotRunning,
nodeAgentPodRunning1,
@@ -157,8 +167,9 @@ func TestIsRunningInNode(t *testing.T) {
expectErr: "daemonset pod not found in running state in node fake-node",
},
{
name: "succeed",
nodeName: "fake-node",
name: "succeed",
namespace: "fake-ns",
nodeName: "fake-node",
kubeClientObj: []runtime.Object{
nodeAgentPodNotRunning,
nodeAgentPodRunning1,
@@ -166,6 +177,23 @@ func TestIsRunningInNode(t *testing.T) {
nodeAgentPodRunning3,
},
},
{
name: "cross-namespace isolation - pod in wrong namespace on same node",
namespace: "fake-ns",
nodeName: "fake-node",
kubeClientObj: []runtime.Object{
nodeAgentPodOtherNs,
},
expectErr: "daemonset pod not found in running state in node fake-node",
},
{
name: "cross-namespace isolation - pod in correct namespace on same node",
namespace: "other-ns",
nodeName: "fake-node",
kubeClientObj: []runtime.Object{
nodeAgentPodOtherNs,
},
},
}
for _, test := range tests {
@@ -175,7 +203,7 @@ func TestIsRunningInNode(t *testing.T) {
fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build()
err := IsRunningInNode(t.Context(), "", test.nodeName, fakeClient)
err := IsRunningInNode(t.Context(), test.namespace, test.nodeName, fakeClient)
if test.expectErr == "" {
assert.NoError(t, err)
} else {