diff --git a/internal/config/concierge/config.go b/internal/config/concierge/config.go index ad0f43dfc..633d481a3 100644 --- a/internal/config/concierge/config.go +++ b/internal/config/concierge/config.go @@ -192,6 +192,14 @@ func validateNames(names *NamesConfigSpec) error { } func validateKubeCertAgent(agentConfig *KubeCertAgentSpec) error { + if agentConfig.RunAsUser != nil && *agentConfig.RunAsUser < 0 { + return constable.Error(fmt.Sprintf("runAsUser must be 0 or greater (instead of %d)", *agentConfig.RunAsUser)) + } + + if agentConfig.RunAsGroup != nil && *agentConfig.RunAsGroup < 0 { + return constable.Error(fmt.Sprintf("runAsGroup must be 0 or greater (instead of %d)", *agentConfig.RunAsGroup)) + } + if len(agentConfig.PriorityClassName) == 0 { // Optional, so empty is valid. return nil diff --git a/internal/config/concierge/config_test.go b/internal/config/concierge/config_test.go index 0ff1d512b..f90cd66a0 100644 --- a/internal/config/concierge/config_test.go +++ b/internal/config/concierge/config_test.go @@ -69,6 +69,8 @@ func TestFromPath(t *testing.T) { image: kube-cert-agent-image imagePullSecrets: [kube-cert-agent-image-pull-secret] priorityClassName: %s + runAsUser: 1 + runAsGroup: 2 log: level: debug tls: @@ -121,6 +123,8 @@ func TestFromPath(t *testing.T) { Image: ptr.To("kube-cert-agent-image"), ImagePullSecrets: []string{"kube-cert-agent-image-pull-secret"}, PriorityClassName: stringOfLength253, + RunAsUser: ptr.To(int64(1)), + RunAsGroup: ptr.To(int64(2)), }, Log: plog.LogSpec{ Level: plog.LevelDebug, @@ -755,6 +759,48 @@ func TestFromPath(t *testing.T) { `), wantError: `validate kubeCertAgent: invalid priorityClassName: a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')`, }, + { + name: "negative runAsUser", + yaml: here.Doc(` + --- + names: + servingCertificateSecret: pinniped-concierge-api-tls-serving-certificate + credentialIssuer: pinniped-config + apiService: pinniped-api + impersonationLoadBalancerService: impersonationLoadBalancerService-value + impersonationClusterIPService: impersonationClusterIPService-value + impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value + impersonationCACertificateSecret: impersonationCACertificateSecret-value + impersonationSignerSecret: impersonationSignerSecret-value + agentServiceAccount: agentServiceAccount-value + impersonationProxyServiceAccount: impersonationProxyServiceAccount-value + impersonationProxyLegacySecret: impersonationProxyLegacySecret-value + kubeCertAgent: + runAsUser: -1 + `), + wantError: `validate kubeCertAgent: runAsUser must be 0 or greater (instead of -1)`, + }, + { + name: "negative runAsGroup", + yaml: here.Doc(` + --- + names: + servingCertificateSecret: pinniped-concierge-api-tls-serving-certificate + credentialIssuer: pinniped-config + apiService: pinniped-api + impersonationLoadBalancerService: impersonationLoadBalancerService-value + impersonationClusterIPService: impersonationClusterIPService-value + impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value + impersonationCACertificateSecret: impersonationCACertificateSecret-value + impersonationSignerSecret: impersonationSignerSecret-value + agentServiceAccount: agentServiceAccount-value + impersonationProxyServiceAccount: impersonationProxyServiceAccount-value + impersonationProxyLegacySecret: impersonationProxyLegacySecret-value + kubeCertAgent: + runAsGroup: -1 + `), + wantError: `validate kubeCertAgent: runAsGroup must be 0 or greater (instead of -1)`, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/internal/config/concierge/types.go b/internal/config/concierge/types.go index d86dc8d1d..22ccfaa99 100644 --- a/internal/config/concierge/types.go +++ b/internal/config/concierge/types.go @@ -111,4 +111,13 @@ type KubeCertAgentSpec struct { // PriorityClassName optionally sets the PriorityClassName for the agent's pod. PriorityClassName string `json:"priorityClassName"` + + // The UID to run the entrypoint of the kube-cert-agent container. + // Defaults to 0 (root). No validation is performed on this value. + // If set to any value other than 0, RunAsNonRoot will be set to true. + RunAsUser *int64 `json:"runAsUser"` + + // The GID to run the entrypoint of the kube-cert-agent container. + // Defaults to 0 (root). No validation is performed on this value. + RunAsGroup *int64 `json:"runAsGroup"` } diff --git a/internal/controller/kubecertagent/kubecertagent.go b/internal/controller/kubecertagent/kubecertagent.go index 0557f7cae..9310d4534 100644 --- a/internal/controller/kubecertagent/kubecertagent.go +++ b/internal/controller/kubecertagent/kubecertagent.go @@ -104,6 +104,12 @@ type AgentConfig struct { // PriorityClassName optionally sets the PriorityClassName for the agent's pod. PriorityClassName string + + // RunAsUser is the UID to run the entrypoint of the container process + RunAsUser *int64 + + // RunAsGroup is the GID to run the entrypoint of the container process + RunAsGroup *int64 } // Only select using the unique label which will not match the pods of any other Deployment. @@ -585,6 +591,29 @@ func (c *agentController) newestRunningPodOnSchedulableNode(ctx context.Context, return pods[0], nil } +func (c *agentController) getPodSecurityContext() *corev1.PodSecurityContext { + root := int64(0) + + podSecurityContext := &corev1.PodSecurityContext{ + RunAsUser: &root, + RunAsGroup: &root, + } + + if c.cfg.RunAsUser != nil { + podSecurityContext.RunAsUser = c.cfg.RunAsUser + } + + if *podSecurityContext.RunAsUser != root { + podSecurityContext.RunAsNonRoot = ptr.To(true) + } + + if c.cfg.RunAsGroup != nil { + podSecurityContext.RunAsGroup = c.cfg.RunAsGroup + } + + return podSecurityContext +} + func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *appsv1.Deployment { var volumeMounts []corev1.VolumeMount if len(controllerManagerPod.Spec.Containers) > 0 { @@ -662,10 +691,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) * Tolerations: controllerManagerPod.Spec.Tolerations, // We need to run the agent pod as root since the file permissions // on the cluster keypair usually restricts access to only root. - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: ptr.To[int64](0), - RunAsGroup: ptr.To[int64](0), - }, + SecurityContext: c.getPodSecurityContext(), HostNetwork: controllerManagerPod.Spec.HostNetwork, PriorityClassName: c.cfg.PriorityClassName, }, diff --git a/internal/controller/kubecertagent/kubecertagent_test.go b/internal/controller/kubecertagent/kubecertagent_test.go index aa1c6f336..05103eaaa 100644 --- a/internal/controller/kubecertagent/kubecertagent_test.go +++ b/internal/controller/kubecertagent/kubecertagent_test.go @@ -230,6 +230,8 @@ func TestAgentController(t *testing.T) { name string discoveryURLOverride *string agentPriorityClassName string + runAsUser *int64 + runAsGroup *int64 pinnipedObjects []runtime.Object kubeObjects []runtime.Object addKubeReactions func(*kubefake.Clientset) @@ -450,6 +452,64 @@ func TestAgentController(t *testing.T) { LastUpdateTime: metav1.NewTime(now), }, }, + { + name: "created new deployment with overridden runAs, no agent pods running yet", + runAsUser: ptr.To[int64](1), + runAsGroup: ptr.To[int64](2), + pinnipedObjects: []runtime.Object{ + initialCredentialIssuer, + }, + kubeObjects: []runtime.Object{ + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-system", + Name: "kube-controller-manager-3", + Labels: map[string]string{"component": "kube-controller-manager"}, + CreationTimestamp: metav1.NewTime(now.Add(-1 * time.Hour)), + }, + Spec: corev1.PodSpec{NodeName: schedulableControllerManagerNode.Name}, + Status: corev1.PodStatus{Phase: corev1.PodRunning}, + }, + healthyKubeControllerManagerPod, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-system", + Name: "kube-controller-manager-2", + Labels: map[string]string{"component": "kube-controller-manager"}, + CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)), + }, + Spec: corev1.PodSpec{NodeName: schedulableControllerManagerNode.Name}, + Status: corev1.PodStatus{Phase: corev1.PodRunning}, + }, + pendingAgentPod, + schedulableControllerManagerNode, + }, + wantDistinctErrors: []string{ + "could not find a healthy agent pod (1 candidate)", + }, + alsoAllowUndesiredDistinctErrors: []string{ + // due to the high amount of nondeterminism in this test, this error will sometimes also happen, but is not required to happen + `could not ensure agent deployment: deployments.apps "pinniped-concierge-kube-cert-agent" already exists`, + }, + wantDistinctLogs: []string{ + `{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`, + }, + wantAgentDeployment: modifiedHealthyHealthyAgentDeployment(func(deployment *appsv1.Deployment) { + deployment.Spec.Template.Spec.SecurityContext = &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](1), + RunAsGroup: ptr.To[int64](2), + RunAsNonRoot: ptr.To(true), + } + }), + wantDeploymentActionVerbs: []string{"list", "watch", "create"}, + wantStrategy: &conciergeconfigv1alpha1.CredentialIssuerStrategy{ + Type: conciergeconfigv1alpha1.KubeClusterSigningCertificateStrategyType, + Status: conciergeconfigv1alpha1.ErrorStrategyStatus, + Reason: conciergeconfigv1alpha1.CouldNotFetchKeyStrategyReason, + Message: "could not find a healthy agent pod (1 candidate)", + LastUpdateTime: metav1.NewTime(now), + }, + }, { name: "created new deployment based on alternate supported controller-manager CLI flags, no agent pods running yet", pinnipedObjects: []runtime.Object{ @@ -1623,6 +1683,8 @@ func TestAgentController(t *testing.T) { }, DiscoveryURLOverride: tt.discoveryURLOverride, PriorityClassName: tt.agentPriorityClassName, + RunAsUser: tt.runAsUser, + RunAsGroup: tt.runAsGroup, }, &kubeclient.Client{Kubernetes: kubeClientset, PinnipedConcierge: conciergeClientset}, kubeInformers.Core().V1().Pods(), @@ -1751,6 +1813,87 @@ func TestMergeLabelsAndAnnotations(t *testing.T) { } } +func TestGetPodSecurityContext(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + configRunAsUser *int64 + configRunAsGroup *int64 + wantPodSecurityContext *corev1.PodSecurityContext + }{ + { + name: "when the user provides no configuration, run as root", + configRunAsUser: nil, + configRunAsGroup: nil, + wantPodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](0), + RunAsGroup: ptr.To[int64](0), + RunAsNonRoot: nil, + }, + }, + { + name: "when the user provides values, use them", + configRunAsUser: ptr.To[int64](-9223372036854775808), + configRunAsGroup: ptr.To[int64](9223372036854775807), + wantPodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](-9223372036854775808), + RunAsGroup: ptr.To[int64](9223372036854775807), + RunAsNonRoot: ptr.To(true), + }, + }, + { + name: "when the user provides root values, use them", + configRunAsUser: ptr.To[int64](0), + configRunAsGroup: ptr.To[int64](0), + wantPodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](0), + RunAsGroup: ptr.To[int64](0), + RunAsNonRoot: nil, + }, + }, + { + name: "when the user provides root UID with non-root GID, use them", + configRunAsUser: ptr.To[int64](0), + configRunAsGroup: ptr.To[int64](1), + wantPodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](0), + RunAsGroup: ptr.To[int64](1), + RunAsNonRoot: nil, + }, + }, + { + name: "when the user provides non-root UID with root GID, use them", + configRunAsUser: ptr.To[int64](1), + configRunAsGroup: ptr.To[int64](0), + wantPodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](1), + RunAsGroup: ptr.To[int64](0), + RunAsNonRoot: ptr.To(true), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + controller := &agentController{ + cfg: AgentConfig{ + RunAsUser: test.configRunAsUser, + RunAsGroup: test.configRunAsGroup, + }, + } + + podSecurityContext := controller.getPodSecurityContext() + require.NotNil(t, podSecurityContext) + require.NotNil(t, podSecurityContext.RunAsUser) + require.NotNil(t, podSecurityContext.RunAsGroup) + require.Equal(t, test.wantPodSecurityContext, podSecurityContext) + }) + } +} + func deduplicate(strings []string) []string { if strings == nil { return nil diff --git a/internal/controllermanager/prepare_controllers.go b/internal/controllermanager/prepare_controllers.go index ff77157e5..20c78be8a 100644 --- a/internal/controllermanager/prepare_controllers.go +++ b/internal/controllermanager/prepare_controllers.go @@ -141,6 +141,8 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) { //nol CredentialIssuerName: c.NamesConfig.CredentialIssuer, DiscoveryURLOverride: c.DiscoveryURLOverride, PriorityClassName: c.KubeCertAgentConfig.PriorityClassName, + RunAsUser: c.KubeCertAgentConfig.RunAsUser, + RunAsGroup: c.KubeCertAgentConfig.RunAsGroup, } // Create controller manager.