mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-04 20:24:26 +00:00
Force the use of secure TLS config
This change updates the TLS config used by all pinniped components. There are no configuration knobs associated with this change. Thus this change tightens our static defaults. There are four TLS config levels: 1. Secure (TLS 1.3 only) 2. Default (TLS 1.2+ best ciphers that are well supported) 3. Default LDAP (TLS 1.2+ with less good ciphers) 4. Legacy (currently unused, TLS 1.2+ with all non-broken ciphers) Highlights per component: 1. pinniped CLI - uses "secure" config against KAS - uses "default" for all other connections 2. concierge - uses "secure" config as an aggregated API server - uses "default" config as a impersonation proxy API server - uses "secure" config against KAS - uses "default" config for JWT authenticater (mostly, see code) - no changes to webhook authenticater (see code) 3. supervisor - uses "default" config as a server - uses "secure" config against KAS - uses "default" config against OIDC IDPs - uses "default LDAP" config against LDAP IDPs Signed-off-by: Monis Khan <mok@vmware.com>
This commit is contained in:
@@ -4,9 +4,17 @@
|
||||
package kubeclient
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubescheme "k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
@@ -17,6 +25,7 @@ import (
|
||||
pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/scheme"
|
||||
pinnipedsupervisorclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
||||
pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned/scheme"
|
||||
"go.pinniped.dev/internal/crypto/ptls"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
@@ -44,11 +53,16 @@ func New(opts ...Option) (*Client, error) {
|
||||
WithConfig(inClusterConfig)(c) // make sure all writes to clientConfig flow through one code path
|
||||
}
|
||||
|
||||
secureKubeConfig, err := createSecureKubeConfig(c.config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create secure client config: %w", err)
|
||||
}
|
||||
|
||||
// explicitly use json when talking to CRD APIs
|
||||
jsonKubeConfig := createJSONKubeConfig(c.config)
|
||||
jsonKubeConfig := createJSONKubeConfig(secureKubeConfig)
|
||||
|
||||
// explicitly use protobuf when talking to built-in kube APIs
|
||||
protoKubeConfig := createProtoKubeConfig(c.config)
|
||||
protoKubeConfig := createProtoKubeConfig(secureKubeConfig)
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
k8sClient, err := kubernetes.NewForConfig(configWithWrapper(protoKubeConfig, kubescheme.Scheme, kubescheme.Codecs, c.middlewares, c.transportWrapper))
|
||||
@@ -107,3 +121,142 @@ func createProtoKubeConfig(kubeConfig *restclient.Config) *restclient.Config {
|
||||
protoKubeConfig.ContentType = runtime.ContentTypeProtobuf
|
||||
return protoKubeConfig
|
||||
}
|
||||
|
||||
// createSecureKubeConfig returns a copy of the input config with the WrapTransport
|
||||
// enhanced to use the secure TLS configuration of the ptls / phttp packages.
|
||||
func createSecureKubeConfig(kubeConfig *restclient.Config) (*restclient.Config, error) {
|
||||
secureKubeConfig := restclient.CopyConfig(kubeConfig)
|
||||
|
||||
// by setting proxy to always be non-nil, we bust the client-go global TLS config cache.
|
||||
// this is required to make our wrapper function work without data races. the unit tests
|
||||
// associated with this code run in parallel to assert that we are not using the cache.
|
||||
// see k8s.io/client-go/transport.tlsConfigKey
|
||||
if secureKubeConfig.Proxy == nil {
|
||||
secureKubeConfig.Proxy = net.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
|
||||
}
|
||||
|
||||
// make sure restclient.TLSConfigFor always returns a non-nil TLS config
|
||||
if len(secureKubeConfig.NextProtos) == 0 {
|
||||
secureKubeConfig.NextProtos = ptls.Secure(nil).NextProtos
|
||||
}
|
||||
|
||||
tlsConfigTest, err := restclient.TLSConfigFor(secureKubeConfig)
|
||||
if err != nil {
|
||||
return nil, err // should never happen because our input config should always be valid
|
||||
}
|
||||
if tlsConfigTest == nil {
|
||||
return nil, fmt.Errorf("unexpected empty TLS config") // should never happen because we set NextProtos above
|
||||
}
|
||||
|
||||
secureKubeConfig.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||
defer func() {
|
||||
if err := AssertSecureTransport(rt); err != nil {
|
||||
panic(err) // not sure what the point of this function would be if it failed to make the config secure
|
||||
}
|
||||
}()
|
||||
|
||||
tlsConfig, err := netTLSClientConfig(rt)
|
||||
if err != nil {
|
||||
// this assumes none of our production code calls Wrap or messes with WrapTransport.
|
||||
// this is a reasonable assumption because all such code should live in this package
|
||||
// and all such code should run after this function is called, not before. the kube
|
||||
// codebase uses transport wrappers that can be unwrapped to access the underlying
|
||||
// TLS config.
|
||||
panic(err)
|
||||
}
|
||||
if tlsConfig == nil {
|
||||
panic("unexpected empty TLS config") // we validate this case above via tlsConfigTest
|
||||
}
|
||||
|
||||
// mutate the TLS config into our desired state before it is used
|
||||
ptls.Merge(ptls.Secure, tlsConfig)
|
||||
|
||||
return rt // return the input transport since we mutated it in-place
|
||||
})
|
||||
|
||||
if err := AssertSecureConfig(secureKubeConfig); err != nil {
|
||||
return nil, err // not sure what the point of this function would be if it failed to make the config secure
|
||||
}
|
||||
|
||||
return secureKubeConfig, nil
|
||||
}
|
||||
|
||||
// SecureAnonymousClientConfig has the same properties as restclient.AnonymousClientConfig
|
||||
// while still enforcing the secure TLS configuration of the ptls / phttp packages.
|
||||
func SecureAnonymousClientConfig(kubeConfig *restclient.Config) *restclient.Config {
|
||||
kubeConfig = restclient.AnonymousClientConfig(kubeConfig)
|
||||
secureKubeConfig, err := createSecureKubeConfig(kubeConfig)
|
||||
if err != nil {
|
||||
panic(err) // should never happen as this would only fail on invalid CA data, which would never work anyway
|
||||
}
|
||||
if err := AssertSecureConfig(secureKubeConfig); err != nil {
|
||||
panic(err) // not sure what the point of this function would be if it failed to make the config secure
|
||||
}
|
||||
return secureKubeConfig
|
||||
}
|
||||
|
||||
func AssertSecureConfig(kubeConfig *restclient.Config) error {
|
||||
rt, err := restclient.TransportFor(kubeConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build transport: %w", err)
|
||||
}
|
||||
|
||||
return AssertSecureTransport(rt)
|
||||
}
|
||||
|
||||
func AssertSecureTransport(rt http.RoundTripper) error {
|
||||
tlsConfig, err := netTLSClientConfig(rt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get TLS config: %w", err)
|
||||
}
|
||||
|
||||
tlsConfigCopy := tlsConfig.Clone()
|
||||
ptls.Merge(ptls.Secure, tlsConfigCopy) // only mutate the copy
|
||||
|
||||
//nolint: gosec // the empty TLS config here is not used
|
||||
if diff := cmp.Diff(tlsConfigCopy, tlsConfig,
|
||||
cmpopts.IgnoreUnexported(tls.Config{}, x509.CertPool{}),
|
||||
cmpopts.IgnoreFields(tls.Config{}, "GetClientCertificate"),
|
||||
); len(diff) != 0 {
|
||||
return fmt.Errorf("tls config is not secure:\n%s", diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func netTLSClientConfig(rt http.RoundTripper) (*tls.Config, error) {
|
||||
tlsConfig, err := net.TLSClientConfig(rt)
|
||||
if err == nil {
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// TODO fix when we pick up https://github.com/kubernetes/kubernetes/pull/106014
|
||||
if err.Error() == "unknown transport type: *exec.roundTripper" {
|
||||
return net.TLSClientConfig(extractRTUnsafe(rt))
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func extractRTUnsafe(rt http.RoundTripper) (out http.RoundTripper) {
|
||||
for wrapper, ok := rt.(net.RoundTripperWrapper); ok; wrapper, ok = rt.(net.RoundTripperWrapper) {
|
||||
// keep peeling the wrappers until we get to the exec.roundTripper
|
||||
rt = wrapper.WrappedRoundTripper()
|
||||
}
|
||||
|
||||
// this is some dark magic to read a private field
|
||||
baseField := reflect.ValueOf(rt).Elem().FieldByName("base")
|
||||
basePointer := (*http.RoundTripper)(unsafe.Pointer(baseField.UnsafeAddr()))
|
||||
|
||||
return *basePointer
|
||||
}
|
||||
|
||||
func Secure(config *restclient.Config) (kubernetes.Interface, *restclient.Config, error) {
|
||||
// our middleware does not apply to the returned restclient.Config, therefore, this
|
||||
// client not having a leader election lock is irrelevant since it would not be enforced
|
||||
secureClient, err := New(WithConfig(config)) // handles nil config correctly
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to build secure client: %w", err)
|
||||
}
|
||||
return secureClient.Kubernetes, secureClient.ProtoConfig, nil
|
||||
}
|
||||
|
||||
@@ -7,22 +7,31 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/transport"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
|
||||
// register all client-go auth plugins.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
conciergeconfigv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
supervisorconfigv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
||||
"go.pinniped.dev/internal/crypto/ptls"
|
||||
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||
"go.pinniped.dev/internal/testutil/fakekubeapi"
|
||||
)
|
||||
|
||||
@@ -514,9 +523,12 @@ func TestKubeclient(t *testing.T) {
|
||||
return []*spyMiddleware{newSimpleMiddleware(t, true, false, false)}
|
||||
},
|
||||
editRestConfig: func(t *testing.T, restConfig *rest.Config) {
|
||||
restConfig.Dial = func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return nil, fmt.Errorf("some fake connection error")
|
||||
}
|
||||
// avoid messing with restConfig.Dial since it breaks client-go TLS cache logic
|
||||
restConfig.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundtripper.WrapFunc(rt, func(_ *http.Request) (*http.Response, error) {
|
||||
return nil, fmt.Errorf("some fake connection error")
|
||||
})
|
||||
})
|
||||
},
|
||||
reallyRunTest: func(t *testing.T, c *Client) {
|
||||
_, err := c.PinnipedSupervisor.
|
||||
@@ -591,8 +603,7 @@ func TestKubeclient(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
server, restConfig := fakekubeapi.Start(t, nil)
|
||||
defer server.Close()
|
||||
_, restConfig := fakekubeapi.Start(t, nil)
|
||||
|
||||
if test.editRestConfig != nil {
|
||||
test.editRestConfig(t, restConfig)
|
||||
@@ -754,21 +765,45 @@ func newFailingMiddleware(t *testing.T, name string, mutateReqFails, mutateRespF
|
||||
}
|
||||
|
||||
type wantCloser struct {
|
||||
io.ReadCloser
|
||||
closeCount int
|
||||
closeCalls []string
|
||||
couldReadBytesJustBeforeClosing bool
|
||||
m sync.Mutex
|
||||
|
||||
_rc io.ReadCloser
|
||||
_closeCalls []string
|
||||
_couldReadBytesJustBeforeClosing bool
|
||||
}
|
||||
|
||||
func (wc *wantCloser) Close() error {
|
||||
wc.closeCount++
|
||||
wc.closeCalls = append(wc.closeCalls, getCaller())
|
||||
n, _ := wc.ReadCloser.Read([]byte{0})
|
||||
func (w *wantCloser) Close() error {
|
||||
w.m.Lock()
|
||||
defer w.m.Unlock()
|
||||
|
||||
w._closeCalls = append(w._closeCalls, getCaller())
|
||||
n, _ := w._rc.Read([]byte{0})
|
||||
if n > 0 {
|
||||
// there were still bytes left to be read
|
||||
wc.couldReadBytesJustBeforeClosing = true
|
||||
w._couldReadBytesJustBeforeClosing = true
|
||||
}
|
||||
return wc.ReadCloser.Close()
|
||||
return w._rc.Close()
|
||||
}
|
||||
|
||||
func (w *wantCloser) Read(p []byte) (int, error) {
|
||||
w.m.Lock()
|
||||
defer w.m.Unlock()
|
||||
|
||||
return w._rc.Read(p)
|
||||
}
|
||||
|
||||
func (w *wantCloser) couldRead() bool {
|
||||
w.m.Lock()
|
||||
defer w.m.Unlock()
|
||||
|
||||
return w._couldReadBytesJustBeforeClosing
|
||||
}
|
||||
|
||||
func (w *wantCloser) calls() []string {
|
||||
w.m.Lock()
|
||||
defer w.m.Unlock()
|
||||
|
||||
return w._closeCalls
|
||||
}
|
||||
|
||||
func getCaller() string {
|
||||
@@ -785,11 +820,14 @@ func getCaller() string {
|
||||
func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
||||
caller := getCaller()
|
||||
return func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||
return roundtripper.WrapFunc(rt, roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||
if req.Body != nil {
|
||||
wc := &wantCloser{ReadCloser: req.Body}
|
||||
wc := &wantCloser{_rc: req.Body}
|
||||
t.Cleanup(func() {
|
||||
require.Equalf(t, wc.closeCount, 1, "did not close req body expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.closeCalls)
|
||||
require.Eventuallyf(t, func() bool {
|
||||
return 1 == len(wc.calls())
|
||||
}, 5*time.Second, 100*time.Millisecond,
|
||||
"did not close req body expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.calls())
|
||||
})
|
||||
req.Body = wc
|
||||
}
|
||||
@@ -800,9 +838,12 @@ func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
||||
if originalErr != nil {
|
||||
return nil, originalErr
|
||||
}
|
||||
wc := &wantCloser{ReadCloser: originalBodyCopy}
|
||||
wc := &wantCloser{_rc: originalBodyCopy}
|
||||
t.Cleanup(func() {
|
||||
require.Equalf(t, wc.closeCount, 1, "did not close req body copy expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.closeCalls)
|
||||
require.Eventuallyf(t, func() bool {
|
||||
return 1 == len(wc.calls())
|
||||
}, 5*time.Second, 100*time.Millisecond,
|
||||
"did not close req body copy expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.calls())
|
||||
})
|
||||
return wc, nil
|
||||
}
|
||||
@@ -810,7 +851,7 @@ func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
||||
|
||||
resp, err := rt.RoundTrip(req)
|
||||
return false, resp, err
|
||||
})
|
||||
}).RoundTrip)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -819,20 +860,24 @@ func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
||||
func wantCloseRespWrapper(t *testing.T) transport.WrapperFunc {
|
||||
caller := getCaller()
|
||||
return func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||
return roundtripper.WrapFunc(rt, roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||
resp, err := rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
// request failed, so there is no response body to watch for Close() calls on
|
||||
return false, resp, err
|
||||
}
|
||||
wc := &wantCloser{ReadCloser: resp.Body}
|
||||
wc := &wantCloser{_rc: resp.Body}
|
||||
t.Cleanup(func() {
|
||||
require.False(t, wc.couldReadBytesJustBeforeClosing, "did not consume all response body bytes before closing %s", caller)
|
||||
require.Equalf(t, wc.closeCount, 1, "did not close resp body expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.closeCalls)
|
||||
require.Eventuallyf(t, func() bool {
|
||||
return wc.couldRead() == false &&
|
||||
1 == len(wc.calls())
|
||||
}, 5*time.Second, 10*time.Millisecond,
|
||||
`did not close resp body expected number of times at %s for req %#v; actual calls = %s
|
||||
did not consume all response body bytes before closing %s, couldRead=%v`, caller, req, wc.calls(), caller, wc.couldRead())
|
||||
})
|
||||
resp.Body = wc
|
||||
return false, resp, err
|
||||
})
|
||||
}).RoundTrip)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -895,3 +940,221 @@ func createGetFederationDomainTest(t *testing.T, client *Client) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, goodFederationDomain, federationDomain)
|
||||
}
|
||||
|
||||
// TestUnwrap ensures that the Client struct returned by this package only contains
|
||||
// transports that can be fully unwrapped to get access to the underlying TLS config.
|
||||
func TestUnwrap(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
server, restConfig := fakekubeapi.Start(t, nil)
|
||||
|
||||
serverSubjects := server.Client().Transport.(*http.Transport).TLSClientConfig.RootCAs.Subjects()
|
||||
|
||||
t.Run("regular client", func(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
regularClient := makeClient(t, restConfig, func(_ *rest.Config) {})
|
||||
|
||||
testUnwrap(t, regularClient, serverSubjects)
|
||||
})
|
||||
|
||||
t.Run("exec client", func(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
execClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||
config.ExecProvider = &clientcmdapi.ExecConfig{
|
||||
Command: "echo",
|
||||
Args: []string{"pandas are awesome"},
|
||||
APIVersion: clientauthenticationv1.SchemeGroupVersion.String(),
|
||||
InteractiveMode: clientcmdapi.NeverExecInteractiveMode,
|
||||
}
|
||||
})
|
||||
|
||||
testUnwrap(t, execClient, serverSubjects)
|
||||
})
|
||||
|
||||
t.Run("gcp client", func(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
gcpClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||
config.AuthProvider = &clientcmdapi.AuthProviderConfig{
|
||||
Name: "gcp",
|
||||
}
|
||||
})
|
||||
|
||||
testUnwrap(t, gcpClient, serverSubjects)
|
||||
})
|
||||
|
||||
t.Run("oidc client", func(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
oidcClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||
config.AuthProvider = &clientcmdapi.AuthProviderConfig{
|
||||
Name: "oidc",
|
||||
Config: map[string]string{
|
||||
"idp-issuer-url": "https://pandas.local",
|
||||
"client-id": "walrus",
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
testUnwrap(t, oidcClient, serverSubjects)
|
||||
})
|
||||
|
||||
t.Run("azure client", func(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
azureClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||
config.AuthProvider = &clientcmdapi.AuthProviderConfig{
|
||||
Name: "azure",
|
||||
Config: map[string]string{
|
||||
"client-id": "pinny",
|
||||
"tenant-id": "danger",
|
||||
"apiserver-id": "1234",
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
testUnwrap(t, azureClient, serverSubjects)
|
||||
})
|
||||
}
|
||||
|
||||
func testUnwrap(t *testing.T, client *Client, serverSubjects [][]byte) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rt http.RoundTripper
|
||||
}{
|
||||
{
|
||||
name: "core v1",
|
||||
rt: extractTransport(client.Kubernetes.CoreV1()),
|
||||
},
|
||||
{
|
||||
name: "coordination v1",
|
||||
rt: extractTransport(client.Kubernetes.CoordinationV1()),
|
||||
},
|
||||
{
|
||||
name: "api registration v1",
|
||||
rt: extractTransport(client.Aggregation.ApiregistrationV1()),
|
||||
},
|
||||
{
|
||||
name: "concierge login",
|
||||
rt: extractTransport(client.PinnipedConcierge.LoginV1alpha1()),
|
||||
},
|
||||
{
|
||||
name: "concierge config",
|
||||
rt: extractTransport(client.PinnipedConcierge.ConfigV1alpha1()),
|
||||
},
|
||||
{
|
||||
name: "supervisor idp",
|
||||
rt: extractTransport(client.PinnipedSupervisor.IDPV1alpha1()),
|
||||
},
|
||||
{
|
||||
name: "supervisor config",
|
||||
rt: extractTransport(client.PinnipedSupervisor.ConfigV1alpha1()),
|
||||
},
|
||||
{
|
||||
name: "json config",
|
||||
rt: configToTransport(t, client.JSONConfig),
|
||||
},
|
||||
{
|
||||
name: "proto config",
|
||||
rt: configToTransport(t, client.ProtoConfig),
|
||||
},
|
||||
{
|
||||
name: "anonymous json config",
|
||||
rt: configToTransport(t, SecureAnonymousClientConfig(client.JSONConfig)),
|
||||
},
|
||||
{
|
||||
name: "anonymous proto config",
|
||||
rt: configToTransport(t, SecureAnonymousClientConfig(client.ProtoConfig)),
|
||||
},
|
||||
{
|
||||
name: "json config - no cache",
|
||||
rt: configToTransport(t, bustTLSCache(client.JSONConfig)),
|
||||
},
|
||||
{
|
||||
name: "proto config - no cache",
|
||||
rt: configToTransport(t, bustTLSCache(client.ProtoConfig)),
|
||||
},
|
||||
{
|
||||
name: "anonymous json config - no cache, inner bust",
|
||||
rt: configToTransport(t, SecureAnonymousClientConfig(bustTLSCache(client.JSONConfig))),
|
||||
},
|
||||
{
|
||||
name: "anonymous proto config - no cache, inner bust",
|
||||
rt: configToTransport(t, SecureAnonymousClientConfig(bustTLSCache(client.ProtoConfig))),
|
||||
},
|
||||
{
|
||||
name: "anonymous json config - no cache, double bust",
|
||||
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(bustTLSCache(client.JSONConfig)))),
|
||||
},
|
||||
{
|
||||
name: "anonymous proto config - no cache, double bust",
|
||||
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(bustTLSCache(client.ProtoConfig)))),
|
||||
},
|
||||
{
|
||||
name: "anonymous json config - no cache, outer bust",
|
||||
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(client.JSONConfig))),
|
||||
},
|
||||
{
|
||||
name: "anonymous proto config - no cache, outer bust",
|
||||
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(client.ProtoConfig))),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||
|
||||
tlsConfig, err := netTLSClientConfig(tt.rt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tlsConfig)
|
||||
|
||||
secureTLSConfig := ptls.Secure(nil)
|
||||
|
||||
require.Equal(t, secureTLSConfig.MinVersion, tlsConfig.MinVersion)
|
||||
require.Equal(t, secureTLSConfig.CipherSuites, tlsConfig.CipherSuites)
|
||||
require.Equal(t, secureTLSConfig.NextProtos, tlsConfig.NextProtos)
|
||||
|
||||
// x509.CertPool has some embedded functions that make it hard to compare so just look at the subjects
|
||||
require.Equal(t, serverSubjects, tlsConfig.RootCAs.Subjects())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type restClientGetter interface {
|
||||
RESTClient() rest.Interface
|
||||
}
|
||||
|
||||
func extractTransport(getter restClientGetter) http.RoundTripper {
|
||||
return getter.RESTClient().(*rest.RESTClient).Client.Transport
|
||||
}
|
||||
|
||||
func configToTransport(t *testing.T, config *rest.Config) http.RoundTripper {
|
||||
t.Helper()
|
||||
|
||||
rt, err := rest.TransportFor(config)
|
||||
require.NoError(t, err)
|
||||
return rt
|
||||
}
|
||||
|
||||
func bustTLSCache(config *rest.Config) *rest.Config {
|
||||
c := rest.CopyConfig(config)
|
||||
c.Proxy = func(h *http.Request) (*url.URL, error) {
|
||||
return nil, nil // having a non-nil proxy func makes client-go not cache the TLS config
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func makeClient(t *testing.T, restConfig *rest.Config, f func(*rest.Config)) *Client {
|
||||
t.Helper()
|
||||
|
||||
restConfig = rest.CopyConfig(restConfig)
|
||||
|
||||
f(restConfig)
|
||||
|
||||
client, err := New(WithConfig(restConfig))
|
||||
require.NoError(t, err)
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
|
||||
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
@@ -78,7 +79,7 @@ func newWrapper(
|
||||
middlewares []Middleware,
|
||||
) transport.WrapperFunc {
|
||||
return func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||
return roundtripper.WrapFunc(rt, roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||
reqInfo, err := resolver.NewRequestInfo(reqWithoutPrefix(req, hostURL, apiPathPrefix))
|
||||
if err != nil || !reqInfo.IsResourceRequest {
|
||||
resp, err := rt.RoundTrip(req) // we only handle kube resource requests
|
||||
@@ -120,7 +121,7 @@ func newWrapper(
|
||||
resp, err := rt.RoundTrip(req) // we only handle certain verbs
|
||||
return false, resp, err
|
||||
}
|
||||
})
|
||||
}).RoundTrip)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user