mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-05 04:56:11 +00:00
Allow users to specify the RunAsUser and RunAsGroup for the kube-cert-agent container
This commit is contained in:
@@ -104,6 +104,12 @@ type AgentConfig struct {
|
||||
|
||||
// PriorityClassName optionally sets the PriorityClassName for the agent's pod.
|
||||
PriorityClassName string
|
||||
|
||||
// RunAsUser is the UID to run the entrypoint of the container process
|
||||
RunAsUser *int64
|
||||
|
||||
// RunAsGroup is the GID to run the entrypoint of the container process
|
||||
RunAsGroup *int64
|
||||
}
|
||||
|
||||
// Only select using the unique label which will not match the pods of any other Deployment.
|
||||
@@ -585,6 +591,29 @@ func (c *agentController) newestRunningPodOnSchedulableNode(ctx context.Context,
|
||||
return pods[0], nil
|
||||
}
|
||||
|
||||
func (c *agentController) getPodSecurityContext() *corev1.PodSecurityContext {
|
||||
root := int64(0)
|
||||
|
||||
podSecurityContext := &corev1.PodSecurityContext{
|
||||
RunAsUser: &root,
|
||||
RunAsGroup: &root,
|
||||
}
|
||||
|
||||
if c.cfg.RunAsUser != nil {
|
||||
podSecurityContext.RunAsUser = c.cfg.RunAsUser
|
||||
}
|
||||
|
||||
if *podSecurityContext.RunAsUser != root {
|
||||
podSecurityContext.RunAsNonRoot = ptr.To(true)
|
||||
}
|
||||
|
||||
if c.cfg.RunAsGroup != nil {
|
||||
podSecurityContext.RunAsGroup = c.cfg.RunAsGroup
|
||||
}
|
||||
|
||||
return podSecurityContext
|
||||
}
|
||||
|
||||
func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *appsv1.Deployment {
|
||||
var volumeMounts []corev1.VolumeMount
|
||||
if len(controllerManagerPod.Spec.Containers) > 0 {
|
||||
@@ -662,10 +691,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
|
||||
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||
// We need to run the agent pod as root since the file permissions
|
||||
// on the cluster keypair usually restricts access to only root.
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](0),
|
||||
RunAsGroup: ptr.To[int64](0),
|
||||
},
|
||||
SecurityContext: c.getPodSecurityContext(),
|
||||
HostNetwork: controllerManagerPod.Spec.HostNetwork,
|
||||
PriorityClassName: c.cfg.PriorityClassName,
|
||||
},
|
||||
|
||||
@@ -230,6 +230,8 @@ func TestAgentController(t *testing.T) {
|
||||
name string
|
||||
discoveryURLOverride *string
|
||||
agentPriorityClassName string
|
||||
runAsUser *int64
|
||||
runAsGroup *int64
|
||||
pinnipedObjects []runtime.Object
|
||||
kubeObjects []runtime.Object
|
||||
addKubeReactions func(*kubefake.Clientset)
|
||||
@@ -450,6 +452,64 @@ func TestAgentController(t *testing.T) {
|
||||
LastUpdateTime: metav1.NewTime(now),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "created new deployment with overridden runAs, no agent pods running yet",
|
||||
runAsUser: ptr.To[int64](1),
|
||||
runAsGroup: ptr.To[int64](2),
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
&corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
Name: "kube-controller-manager-3",
|
||||
Labels: map[string]string{"component": "kube-controller-manager"},
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
},
|
||||
Spec: corev1.PodSpec{NodeName: schedulableControllerManagerNode.Name},
|
||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
||||
},
|
||||
healthyKubeControllerManagerPod,
|
||||
&corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
Name: "kube-controller-manager-2",
|
||||
Labels: map[string]string{"component": "kube-controller-manager"},
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)),
|
||||
},
|
||||
Spec: corev1.PodSpec{NodeName: schedulableControllerManagerNode.Name},
|
||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
||||
},
|
||||
pendingAgentPod,
|
||||
schedulableControllerManagerNode,
|
||||
},
|
||||
wantDistinctErrors: []string{
|
||||
"could not find a healthy agent pod (1 candidate)",
|
||||
},
|
||||
alsoAllowUndesiredDistinctErrors: []string{
|
||||
// due to the high amount of nondeterminism in this test, this error will sometimes also happen, but is not required to happen
|
||||
`could not ensure agent deployment: deployments.apps "pinniped-concierge-kube-cert-agent" already exists`,
|
||||
},
|
||||
wantDistinctLogs: []string{
|
||||
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||
},
|
||||
wantAgentDeployment: modifiedHealthyHealthyAgentDeployment(func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Template.Spec.SecurityContext = &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](1),
|
||||
RunAsGroup: ptr.To[int64](2),
|
||||
RunAsNonRoot: ptr.To(true),
|
||||
}
|
||||
}),
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||
wantStrategy: &conciergeconfigv1alpha1.CredentialIssuerStrategy{
|
||||
Type: conciergeconfigv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: conciergeconfigv1alpha1.ErrorStrategyStatus,
|
||||
Reason: conciergeconfigv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "could not find a healthy agent pod (1 candidate)",
|
||||
LastUpdateTime: metav1.NewTime(now),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "created new deployment based on alternate supported controller-manager CLI flags, no agent pods running yet",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
@@ -1623,6 +1683,8 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
DiscoveryURLOverride: tt.discoveryURLOverride,
|
||||
PriorityClassName: tt.agentPriorityClassName,
|
||||
RunAsUser: tt.runAsUser,
|
||||
RunAsGroup: tt.runAsGroup,
|
||||
},
|
||||
&kubeclient.Client{Kubernetes: kubeClientset, PinnipedConcierge: conciergeClientset},
|
||||
kubeInformers.Core().V1().Pods(),
|
||||
@@ -1751,6 +1813,87 @@ func TestMergeLabelsAndAnnotations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodSecurityContext(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
configRunAsUser *int64
|
||||
configRunAsGroup *int64
|
||||
wantPodSecurityContext *corev1.PodSecurityContext
|
||||
}{
|
||||
{
|
||||
name: "when the user provides no configuration, run as root",
|
||||
configRunAsUser: nil,
|
||||
configRunAsGroup: nil,
|
||||
wantPodSecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](0),
|
||||
RunAsGroup: ptr.To[int64](0),
|
||||
RunAsNonRoot: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when the user provides values, use them",
|
||||
configRunAsUser: ptr.To[int64](-9223372036854775808),
|
||||
configRunAsGroup: ptr.To[int64](9223372036854775807),
|
||||
wantPodSecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](-9223372036854775808),
|
||||
RunAsGroup: ptr.To[int64](9223372036854775807),
|
||||
RunAsNonRoot: ptr.To(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when the user provides root values, use them",
|
||||
configRunAsUser: ptr.To[int64](0),
|
||||
configRunAsGroup: ptr.To[int64](0),
|
||||
wantPodSecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](0),
|
||||
RunAsGroup: ptr.To[int64](0),
|
||||
RunAsNonRoot: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when the user provides root UID with non-root GID, use them",
|
||||
configRunAsUser: ptr.To[int64](0),
|
||||
configRunAsGroup: ptr.To[int64](1),
|
||||
wantPodSecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](0),
|
||||
RunAsGroup: ptr.To[int64](1),
|
||||
RunAsNonRoot: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when the user provides non-root UID with root GID, use them",
|
||||
configRunAsUser: ptr.To[int64](1),
|
||||
configRunAsGroup: ptr.To[int64](0),
|
||||
wantPodSecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](1),
|
||||
RunAsGroup: ptr.To[int64](0),
|
||||
RunAsNonRoot: ptr.To(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
controller := &agentController{
|
||||
cfg: AgentConfig{
|
||||
RunAsUser: test.configRunAsUser,
|
||||
RunAsGroup: test.configRunAsGroup,
|
||||
},
|
||||
}
|
||||
|
||||
podSecurityContext := controller.getPodSecurityContext()
|
||||
require.NotNil(t, podSecurityContext)
|
||||
require.NotNil(t, podSecurityContext.RunAsUser)
|
||||
require.NotNil(t, podSecurityContext.RunAsGroup)
|
||||
require.Equal(t, test.wantPodSecurityContext, podSecurityContext)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func deduplicate(strings []string) []string {
|
||||
if strings == nil {
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user