Merge branch 'main' into customize_ports

This commit is contained in:
Ryan Richard
2021-11-18 09:31:18 -08:00
48 changed files with 2431 additions and 317 deletions

View File

@@ -9,6 +9,8 @@ import (
"encoding/base64"
"fmt"
"k8s.io/client-go/util/cert"
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
)
@@ -23,19 +25,20 @@ type Closer interface {
// CABundle returns a PEM-encoded CA bundle from the provided spec. If the provided spec is nil, a
// nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly
// encoded, an error will be returned.
func CABundle(spec *auth1alpha1.TLSSpec) ([]byte, error) {
func CABundle(spec *auth1alpha1.TLSSpec) (*x509.CertPool, []byte, error) {
if spec == nil || len(spec.CertificateAuthorityData) == 0 {
return nil, nil
return nil, nil, nil
}
pem, err := base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
if err != nil {
return nil, err
return nil, nil, err
}
if ok := x509.NewCertPool().AppendCertsFromPEM(pem); !ok {
return nil, fmt.Errorf("certificateAuthorityData is not valid PEM")
rootCAs, err := cert.NewPoolFromBytes(pem)
if err != nil {
return nil, nil, fmt.Errorf("certificateAuthorityData is not valid PEM: %w", err)
}
return pem, nil
return rootCAs, pem, nil
}

View File

@@ -6,9 +6,13 @@
package jwtcachefiller
import (
"context"
"fmt"
"net/url"
"reflect"
"time"
coreosoidc "github.com/coreos/go-oidc/v3/oidc"
"github.com/go-logr/logr"
"gopkg.in/square/go-jose.v2"
"k8s.io/apimachinery/pkg/api/errors"
@@ -23,6 +27,7 @@ import (
pinnipedauthenticator "go.pinniped.dev/internal/controller/authenticator"
"go.pinniped.dev/internal/controller/authenticator/authncache"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/net/phttp"
)
// These default values come from the way that the Supervisor issues and signs tokens. We make these
@@ -145,7 +150,7 @@ func (c *controller) extractValueAsJWTAuthenticator(value authncache.Value) *jwt
// newJWTAuthenticator creates a jwt authenticator from the provided spec.
func newJWTAuthenticator(spec *auth1alpha1.JWTAuthenticatorSpec) (*jwtAuthenticator, error) {
caBundle, err := pinnipedauthenticator.CABundle(spec.TLS)
rootCAs, caBundle, err := pinnipedauthenticator.CABundle(spec.TLS)
if err != nil {
return nil, fmt.Errorf("invalid TLS configuration: %w", err)
}
@@ -167,20 +172,51 @@ func newJWTAuthenticator(spec *auth1alpha1.JWTAuthenticatorSpec) (*jwtAuthentica
groupsClaim = defaultGroupsClaim
}
authenticator, err := oidc.New(oidc.Options{
// copied from Kube OIDC code
issuerURL, err := url.Parse(spec.Issuer)
if err != nil {
return nil, err
}
if issuerURL.Scheme != "https" {
return nil, fmt.Errorf("issuer (%q) has invalid scheme (%q), require 'https'", spec.Issuer, issuerURL.Scheme)
}
client := phttp.Default(rootCAs)
client.Timeout = 30 * time.Second // copied from Kube OIDC code
ctx := coreosoidc.ClientContext(context.Background(), client)
provider, err := coreosoidc.NewProvider(ctx, spec.Issuer)
if err != nil {
return nil, fmt.Errorf("could not initialize provider: %w", err)
}
providerJSON := &struct {
JWKSURL string `json:"jwks_uri"`
}{}
if err := provider.Claims(providerJSON); err != nil {
return nil, fmt.Errorf("could not get provider jwks_uri: %w", err) // should be impossible because coreosoidc.NewProvider validates this
}
if len(providerJSON.JWKSURL) == 0 {
return nil, fmt.Errorf("issuer %q does not have jwks_uri set", spec.Issuer)
}
oidcAuthenticator, err := oidc.New(oidc.Options{
IssuerURL: spec.Issuer,
KeySet: coreosoidc.NewRemoteKeySet(ctx, providerJSON.JWKSURL),
ClientID: spec.Audience,
UsernameClaim: usernameClaim,
GroupsClaim: groupsClaim,
SupportedSigningAlgs: defaultSupportedSigningAlgos(),
CAContentProvider: caContentProvider,
// this is still needed for distributed claim resolution, meaning this uses a http client that does not honor our TLS config
// TODO fix when we pick up https://github.com/kubernetes/kubernetes/pull/106141
CAContentProvider: caContentProvider,
})
if err != nil {
return nil, fmt.Errorf("could not initialize authenticator: %w", err)
}
return &jwtAuthenticator{
tokenAuthenticatorCloser: authenticator,
tokenAuthenticatorCloser: oidcAuthenticator,
spec: spec,
}, nil
}

View File

@@ -15,7 +15,6 @@ import (
"encoding/pem"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
@@ -35,8 +34,10 @@ import (
pinnipedinformers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions"
"go.pinniped.dev/internal/controller/authenticator/authncache"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/crypto/ptls"
"go.pinniped.dev/internal/mocks/mocktokenauthenticatorcloser"
"go.pinniped.dev/internal/testutil/testlogger"
"go.pinniped.dev/internal/testutil/tlsserver"
)
func TestController(t *testing.T) {
@@ -57,8 +58,10 @@ func TestController(t *testing.T) {
goodRSASigningAlgo := jose.RS256
mux := http.NewServeMux()
server := httptest.NewTLSServer(mux)
t.Cleanup(server.Close)
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
tlsserver.AssertTLS(t, r, ptls.Default)
mux.ServeHTTP(w, r)
}), tlsserver.RecordTLSHello)
mux.Handle("/.well-known/openid-configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
@@ -290,11 +293,7 @@ func TestController(t *testing.T) {
Spec: *missingTLSJWTAuthenticatorSpec,
},
},
wantLogs: []string{
`jwtcachefiller-controller "level"=0 "msg"="added new jwt authenticator" "issuer"="` + goodIssuer + `" "jwtAuthenticator"={"name":"test-name"}`,
},
wantCacheEntries: 1,
runTestsOnResultingAuthenticator: false, // skip the tests because the authenticator left in the cache doesn't have the CA for our test discovery server
wantErr: `failed to build jwt authenticator: could not initialize provider: Get "` + goodIssuer + `/.well-known/openid-configuration": x509: certificate signed by unknown authority`,
},
{
name: "invalid jwt authenticator CA",

View File

@@ -91,7 +91,7 @@ func newWebhookAuthenticator(
defer func() { _ = os.Remove(temp.Name()) }()
cluster := &clientcmdapi.Cluster{Server: spec.Endpoint}
cluster.CertificateAuthorityData, err = pinnipedauthenticator.CABundle(spec.TLS)
_, cluster.CertificateAuthorityData, err = pinnipedauthenticator.CABundle(spec.TLS)
if err != nil {
return nil, fmt.Errorf("invalid TLS configuration: %w", err)
}
@@ -118,5 +118,7 @@ func newWebhookAuthenticator(
// custom proxy stuff used by the API server.
var customDial net.DialFunc
// this uses a http client that does not honor our TLS config
// TODO fix when we pick up https://github.com/kubernetes/kubernetes/pull/106155
return webhook.New(temp.Name(), version, implicitAuds, *webhook.DefaultRetryBackoff(), customDial)
}

View File

@@ -141,7 +141,7 @@ func TestNewWebhookAuthenticator(t *testing.T) {
TLS: &auth1alpha1.TLSSpec{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte("bad data"))},
}, ioutil.TempFile, clientcmd.WriteToFile)
require.Nil(t, res)
require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM")
require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM: data does not contain any valid RSA or ECDSA certificates")
})
t.Run("valid config with no TLS spec", func(t *testing.T) {

View File

@@ -26,6 +26,8 @@ type kubeClientPodCommandExecutor struct {
// NewPodCommandExecutor returns a PodCommandExecutor that will interact with a pod via the provided
// kubeConfig and corresponding kubeClient.
func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.Interface) PodCommandExecutor {
kubeConfig = restclient.CopyConfig(kubeConfig)
kubeConfig.NextProtos = []string{"http/1.1"} // we explicitly need to upgrade from http1 to spdy, exec cannot use http2
return &kubeClientPodCommandExecutor{kubeConfig: kubeConfig, kubeClient: kubeClient}
}

View File

@@ -0,0 +1,44 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"net/http"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/rest"
"go.pinniped.dev/internal/crypto/ptls"
"go.pinniped.dev/internal/kubeclient"
"go.pinniped.dev/internal/testutil/tlsserver"
)
func TestSecureTLS(t *testing.T) {
var sawRequest bool
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
tlsserver.AssertTLS(t, r, ptls.Secure)
sawRequest = true
}), tlsserver.RecordTLSHello)
config := &rest.Config{
Host: server.URL,
TLSClientConfig: rest.TLSClientConfig{
CAData: tlsserver.TLSTestServerCA(server),
},
}
client, err := kubeclient.New(kubeclient.WithConfig(config))
require.NoError(t, err)
// build this exactly like our production could does
podCommandExecutor := NewPodCommandExecutor(client.JSONConfig, client.Kubernetes)
got, err := podCommandExecutor.Exec("podNamespace", "podName", "command", "arg1", "arg2")
require.Equal(t, &errors.StatusError{}, err)
require.Empty(t, got)
require.True(t, sawRequest)
}

View File

@@ -6,7 +6,6 @@ package oidcupstreamwatcher
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
@@ -35,6 +34,7 @@ import (
"go.pinniped.dev/internal/controller/conditionsutil"
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/net/phttp"
"go.pinniped.dev/internal/oidc/provider"
"go.pinniped.dev/internal/upstreamoidc"
)
@@ -313,7 +313,8 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
// If the provider does not exist in the cache, do a fresh discovery lookup and save to the cache.
if discoveredProvider == nil {
tlsConfig, err := getTLSConfig(upstream)
var err error
httpClient, err = getClient(upstream)
if err != nil {
return &v1alpha1.Condition{
Type: typeOIDCDiscoverySucceeded,
@@ -323,14 +324,6 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
}
}
httpClient = &http.Client{
Timeout: time.Minute,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: tlsConfig,
},
}
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
if err != nil {
const klogLevelTrace = 6
@@ -406,13 +399,9 @@ func (c *oidcWatcherController) updateStatus(ctx context.Context, upstream *v1al
}
}
func getTLSConfig(upstream *v1alpha1.OIDCIdentityProvider) (*tls.Config, error) {
result := tls.Config{
MinVersion: tls.VersionTLS12,
}
func getClient(upstream *v1alpha1.OIDCIdentityProvider) (*http.Client, error) {
if upstream.Spec.TLS == nil || upstream.Spec.TLS.CertificateAuthorityData == "" {
return &result, nil
return defaultClientShortTimeout(nil), nil
}
bundle, err := base64.StdEncoding.DecodeString(upstream.Spec.TLS.CertificateAuthorityData)
@@ -420,12 +409,18 @@ func getTLSConfig(upstream *v1alpha1.OIDCIdentityProvider) (*tls.Config, error)
return nil, fmt.Errorf("spec.certificateAuthorityData is invalid: %w", err)
}
result.RootCAs = x509.NewCertPool()
if !result.RootCAs.AppendCertsFromPEM(bundle) {
rootCAs := x509.NewCertPool()
if !rootCAs.AppendCertsFromPEM(bundle) {
return nil, fmt.Errorf("spec.certificateAuthorityData is invalid: %w", upstreamwatchers.ErrNoCertificates)
}
return &result, nil
return defaultClientShortTimeout(rootCAs), nil
}
func defaultClientShortTimeout(rootCAs *x509.CertPool) *http.Client {
c := phttp.Default(rootCAs)
c.Timeout = time.Minute
return c
}
func computeScopes(additionalScopes []string) []string {

View File

@@ -19,6 +19,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
@@ -1014,8 +1015,7 @@ oidc: issuer did not match the issuer returned by provider, expected "` + testIs
// We always want to use the proxy from env on these clients, so although the following assertions
// are a little hacky, this is a cheap way to test that we are using it.
actualTransport, ok := actualIDP.Client.Transport.(*http.Transport)
require.True(t, ok, "expected cached provider to have client with Transport of type *http.Transport")
actualTransport := unwrapTransport(t, actualIDP.Client.Transport)
httpProxyFromEnvFunction := reflect.ValueOf(http.ProxyFromEnvironment).Pointer()
actualTransportProxyFunction := reflect.ValueOf(actualTransport.Proxy).Pointer()
require.Equal(t, httpProxyFromEnvFunction, actualTransportProxyFunction,
@@ -1041,6 +1041,22 @@ oidc: issuer did not match the issuer returned by provider, expected "` + testIs
}
}
func unwrapTransport(t *testing.T, rt http.RoundTripper) *http.Transport {
t.Helper()
switch baseRT := rt.(type) {
case *http.Transport:
return baseRT
case net.RoundTripperWrapper:
return unwrapTransport(t, baseRT.WrappedRoundTripper())
default:
t.Fatalf("expected cached provider to have client with Transport of type *http.Transport, got: %T", baseRT)
return nil // unreachable
}
}
func normalizeOIDCUpstreams(upstreams []v1alpha1.OIDCIdentityProvider, now metav1.Time) []v1alpha1.OIDCIdentityProvider {
result := make([]v1alpha1.OIDCIdentityProvider, 0, len(upstreams))
for _, u := range upstreams {