Merge branch 'main' into username-and-subject-claims

This commit is contained in:
Ryan Richard
2020-12-15 18:09:44 -08:00
38 changed files with 2644 additions and 1510 deletions

View File

@@ -32,7 +32,7 @@ import (
"go.pinniped.dev/test/library/browsertest"
)
func TestCLIGetKubeconfig(t *testing.T) {
func TestCLIGetKubeconfigStaticToken(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
// Create a test webhook configuration to use with the CLI.
@@ -42,70 +42,81 @@ func TestCLIGetKubeconfig(t *testing.T) {
authenticator := library.CreateTestWebhookAuthenticator(ctx, t)
// Build pinniped CLI.
pinnipedExe := buildPinnipedCLI(t)
pinnipedExe := library.PinnipedCLIPath(t)
// Run pinniped CLI to get kubeconfig.
kubeConfigYAML := runPinnipedCLIGetKubeconfig(t, pinnipedExe, env.TestUser.Token, env.ConciergeNamespace, "webhook", authenticator.Name)
for _, tt := range []struct {
name string
args []string
expectStderr string
}{
{
name: "deprecated command",
args: []string{
"get-kubeconfig",
"--token", env.TestUser.Token,
"--pinniped-namespace", env.ConciergeNamespace,
"--authenticator-type", "webhook",
"--authenticator-name", authenticator.Name,
},
expectStderr: "Command \"get-kubeconfig\" is deprecated, Please use `pinniped get kubeconfig` instead.\n",
},
{
name: "newer command, but still using static parameters",
args: []string{
"get", "kubeconfig",
"--static-token", env.TestUser.Token,
"--concierge-namespace", env.ConciergeNamespace,
"--concierge-authenticator-type", "webhook",
"--concierge-authenticator-name", authenticator.Name,
},
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
stdout, stderr := runPinnipedCLI(t, pinnipedExe, tt.args...)
require.Equal(t, tt.expectStderr, stderr)
// In addition to the client-go based testing below, also try the kubeconfig
// with kubectl to validate that it works.
adminClient := library.NewClientset(t)
t.Run(
"access as user with kubectl",
library.AccessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
)
for _, group := range env.TestUser.ExpectedGroups {
group := group
t.Run(
"access as group "+group+" with kubectl",
library.AccessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, env.ConciergeNamespace),
)
}
// Even the deprecated command should now generate a kubeconfig with the new "pinniped login static" command.
restConfig := library.NewRestConfigFromKubeconfig(t, stdout)
require.NotNil(t, restConfig.ExecProvider)
require.Equal(t, []string{"login", "static"}, restConfig.ExecProvider.Args[:2])
// Create Kubernetes client with kubeconfig from pinniped CLI.
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfigYAML)
// In addition to the client-go based testing below, also try the kubeconfig
// with kubectl to validate that it works.
adminClient := library.NewClientset(t)
t.Run(
"access as user with kubectl",
library.AccessAsUserWithKubectlTest(ctx, adminClient, stdout, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
)
for _, group := range env.TestUser.ExpectedGroups {
group := group
t.Run(
"access as group "+group+" with kubectl",
library.AccessAsGroupWithKubectlTest(ctx, adminClient, stdout, group, env.ConciergeNamespace),
)
}
// Validate that we can auth to the API via our user.
t.Run("access as user with client-go", library.AccessAsUserTest(ctx, adminClient, env.TestUser.ExpectedUsername, kubeClient))
for _, group := range env.TestUser.ExpectedGroups {
group := group
t.Run("access as group "+group+" with client-go", library.AccessAsGroupTest(ctx, adminClient, group, kubeClient))
// Create Kubernetes client with kubeconfig from pinniped CLI.
kubeClient := library.NewClientsetForKubeConfig(t, stdout)
// Validate that we can auth to the API via our user.
t.Run("access as user with client-go", library.AccessAsUserTest(ctx, adminClient, env.TestUser.ExpectedUsername, kubeClient))
for _, group := range env.TestUser.ExpectedGroups {
group := group
t.Run("access as group "+group+" with client-go", library.AccessAsGroupTest(ctx, adminClient, group, kubeClient))
}
})
}
}
func buildPinnipedCLI(t *testing.T) string {
func runPinnipedCLI(t *testing.T, pinnipedExe string, args ...string) (string, string) {
t.Helper()
pinnipedExeDir, err := ioutil.TempDir("", "pinniped-cli-test-*")
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, os.RemoveAll(pinnipedExeDir)) })
pinnipedExe := filepath.Join(pinnipedExeDir, "pinniped")
output, err := exec.Command(
"go",
"build",
"-o",
pinnipedExe,
"go.pinniped.dev/cmd/pinniped",
).CombinedOutput()
require.NoError(t, err, string(output))
return pinnipedExe
}
func runPinnipedCLIGetKubeconfig(t *testing.T, pinnipedExe, token, namespaceName, authenticatorType, authenticatorName string) string {
t.Helper()
output, err := exec.Command(
pinnipedExe,
"get-kubeconfig",
"--token", token,
"--pinniped-namespace", namespaceName,
"--authenticator-type", authenticatorType,
"--authenticator-name", authenticatorName,
).CombinedOutput()
require.NoError(t, err, string(output))
return string(output)
var stdout, stderr bytes.Buffer
cmd := exec.Command(pinnipedExe, args...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
require.NoErrorf(t, cmd.Run(), "stderr:\n%s\n\nstdout:\n%s\n\n", stderr.String(), stdout.String())
return stdout.String(), stderr.String()
}
func TestCLILoginOIDC(t *testing.T) {
@@ -115,11 +126,10 @@ func TestCLILoginOIDC(t *testing.T) {
defer cancel()
// Build pinniped CLI.
t.Logf("building CLI binary")
pinnipedExe := buildPinnipedCLI(t)
pinnipedExe := library.PinnipedCLIPath(t)
// Run "pinniped login oidc" to get an ExecCredential struct with an OIDC ID token.
credOutput, sessionCachePath := runPinniedLoginOIDC(ctx, t, pinnipedExe)
credOutput, sessionCachePath := runPinnipedLoginOIDC(ctx, t, pinnipedExe)
// Assert some properties of the ExecCredential.
t.Logf("validating ExecCredential")
@@ -185,7 +195,7 @@ func TestCLILoginOIDC(t *testing.T) {
require.NotEqual(t, credOutput2.Status.Token, credOutput3.Status.Token)
}
func runPinniedLoginOIDC(
func runPinnipedLoginOIDC(
ctx context.Context,
t *testing.T,
pinnipedExe string,
@@ -244,7 +254,7 @@ func runPinniedLoginOIDC(
credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential)
spawnTestGoroutine(t, func() (err error) {
defer func() {
closeErr := stderr.Close()
closeErr := stdout.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
@@ -342,12 +352,6 @@ func oidcLoginCommand(ctx context.Context, t *testing.T, pinnipedExe string, ses
}
// If there is a custom proxy, set it using standard environment variables.
if env.Proxy != "" {
cmd.Env = append(os.Environ(),
"http_proxy="+env.Proxy,
"https_proxy="+env.Proxy,
"no_proxy=127.0.0.1",
)
}
cmd.Env = append(os.Environ(), env.ProxyEnv()...)
return cmd
}

View File

@@ -47,32 +47,29 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
tests := []struct {
name string
authenticator func(t *testing.T) corev1.TypedLocalObjectReference
authenticator func(context.Context, *testing.T) corev1.TypedLocalObjectReference
token func(t *testing.T) (token string, username string, groups []string)
}{
{
name: "webhook",
authenticator: func(t *testing.T) corev1.TypedLocalObjectReference {
return library.CreateTestWebhookAuthenticator(ctx, t)
},
name: "webhook",
authenticator: library.CreateTestWebhookAuthenticator,
token: func(t *testing.T) (string, string, []string) {
return library.IntegrationEnv(t).TestUser.Token, env.TestUser.ExpectedUsername, env.TestUser.ExpectedGroups
},
},
{
name: "jwt authenticator",
authenticator: func(t *testing.T) corev1.TypedLocalObjectReference {
return library.CreateTestJWTAuthenticator(ctx, t, "email")
},
name: "jwt authenticator",
authenticator: library.CreateTestJWTAuthenticatorForCLIUpstream,
token: func(t *testing.T) (string, string, []string) {
pinnipedExe := buildPinnipedCLI(t)
credOutput, _ := runPinniedLoginOIDC(ctx, t, pinnipedExe)
pinnipedExe := library.PinnipedCLIPath(t)
credOutput, _ := runPinnipedLoginOIDC(ctx, t, pinnipedExe)
token := credOutput.Status.Token
// By default, the JWTAuthenticator expects the username to be in the "username" claim and the
// groups to be in the "groups" claim.
// We are configuring pinniped to set the username to be the "email" claim from the token.
username, groups := getJWTEmailAndGroupsClaims(t, token)
// However, we are configuring Pinniped in the `CreateTestJWTAuthenticatorForCLIUpstream` method above
// to read the username from the "sub" claim of the token instead.
username, groups := getJWTSubAndGroupsClaims(t, token)
return token, username, groups
},
@@ -81,7 +78,7 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
authenticator := test.authenticator(t)
authenticator := test.authenticator(ctx, t)
token, username, groups := test.token(t)
var response *loginv1alpha1.TokenCredentialRequest
@@ -234,18 +231,18 @@ func safeDerefStringPtr(s *string) string {
return *s
}
func getJWTEmailAndGroupsClaims(t *testing.T, jwt string) (string, []string) {
func getJWTSubAndGroupsClaims(t *testing.T, jwt string) (string, []string) {
t.Helper()
token, err := jwtpkg.ParseSigned(jwt)
require.NoError(t, err)
var claims struct {
Email string `json:"email"`
Sub string `json:"sub"`
Groups []string `json:"groups"`
}
err = token.UnsafeClaimsWithoutVerification(&claims)
require.NoError(t, err)
return claims.Email, claims.Groups
return claims.Sub, claims.Groups
}

View File

@@ -0,0 +1,258 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package integration
import (
"bufio"
"bytes"
"context"
"crypto/x509/pkix"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
rbacv1 "k8s.io/api/rbac/v1"
authv1alpha "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/supervisor/config/v1alpha1"
idpv1alpha1 "go.pinniped.dev/generated/1.19/apis/supervisor/idp/v1alpha1"
"go.pinniped.dev/internal/certauthority"
"go.pinniped.dev/internal/testutil"
"go.pinniped.dev/test/library"
"go.pinniped.dev/test/library/browsertest"
)
// TestE2EFullIntegration tests a full integration scenario that combines the supervisor, concierge, and CLI.
func TestE2EFullIntegration(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancelFunc()
// Build pinniped CLI.
pinnipedExe := library.PinnipedCLIPath(t)
tempDir := testutil.TempDir(t)
// Start the browser driver.
page := browsertest.Open(t)
// Infer the downstream issuer URL from the callback associated with the upstream test client registration.
issuerURL, err := url.Parse(env.SupervisorTestUpstream.CallbackURL)
require.NoError(t, err)
require.True(t, strings.HasSuffix(issuerURL.Path, "/callback"))
issuerURL.Path = strings.TrimSuffix(issuerURL.Path, "/callback")
t.Logf("testing with downstream issuer URL %s", issuerURL.String())
// Generate a CA bundle with which to serve this provider.
t.Logf("generating test CA")
ca, err := certauthority.New(pkix.Name{CommonName: "Downstream Test CA"}, 1*time.Hour)
require.NoError(t, err)
// Save that bundle plus the one that signs the upstream issuer, for test purposes.
testCABundlePath := filepath.Join(tempDir, "test-ca.pem")
testCABundlePEM := []byte(string(ca.Bundle()) + "\n" + env.SupervisorTestUpstream.CABundle)
testCABundleBase64 := base64.StdEncoding.EncodeToString(testCABundlePEM)
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCABundlePEM, 0600))
// Use the CA to issue a TLS server cert.
t.Logf("issuing test certificate")
tlsCert, err := ca.Issue(
pkix.Name{CommonName: issuerURL.Hostname()},
[]string{issuerURL.Hostname()},
nil,
1*time.Hour,
)
require.NoError(t, err)
certPEM, keyPEM, err := certauthority.ToPEM(tlsCert)
require.NoError(t, err)
// Write the serving cert to a secret.
certSecret := library.CreateTestSecret(t,
env.SupervisorNamespace,
"oidc-provider-tls",
"kubernetes.io/tls",
map[string]string{"tls.crt": string(certPEM), "tls.key": string(keyPEM)},
)
// Create the downstream OIDCProvider and expect it to go into the success status condition.
downstream := library.CreateTestOIDCProvider(ctx, t,
issuerURL.String(),
certSecret.Name,
configv1alpha1.SuccessOIDCProviderStatusCondition,
)
// Create upstream OIDC provider and wait for it to become ready.
library.CreateTestUpstreamOIDCProvider(t, idpv1alpha1.UpstreamOIDCProviderSpec{
Issuer: env.SupervisorTestUpstream.Issuer,
TLS: &idpv1alpha1.TLSSpec{
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorTestUpstream.CABundle)),
},
AuthorizationConfig: idpv1alpha1.OIDCAuthorizationConfig{
AdditionalScopes: []string{"email"},
},
Claims: idpv1alpha1.OIDCClaims{
Username: "email",
},
Client: idpv1alpha1.OIDCClient{
SecretName: library.CreateClientCredsSecret(t, env.SupervisorTestUpstream.ClientID, env.SupervisorTestUpstream.ClientSecret).Name,
},
}, idpv1alpha1.PhaseReady)
// Create a JWTAuthenticator that will validate the tokens from the downstream issuer.
clusterAudience := "test-cluster-" + library.RandHex(t, 8)
authenticator := library.CreateTestJWTAuthenticator(ctx, t, authv1alpha.JWTAuthenticatorSpec{
Issuer: downstream.Spec.Issuer,
Audience: clusterAudience,
TLS: &authv1alpha.TLSSpec{CertificateAuthorityData: testCABundleBase64},
})
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
library.CreateTestClusterRoleBinding(t,
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: env.SupervisorTestUpstream.Username},
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
)
// Use a specific session cache for this test.
sessionCachePath := tempDir + "/sessions.yaml"
// Run "pinniped get kubeconfig" to get a kubeconfig YAML.
kubeconfigYAML, stderr := runPinnipedCLI(t, pinnipedExe, "get", "kubeconfig",
"--concierge-namespace", env.ConciergeNamespace,
"--concierge-authenticator-type", "jwt",
"--concierge-authenticator-name", authenticator.Name,
"--oidc-skip-browser",
"--oidc-ca-bundle", testCABundlePath,
"--oidc-session-cache", sessionCachePath,
)
require.Equal(t, "", stderr)
restConfig := library.NewRestConfigFromKubeconfig(t, kubeconfigYAML)
require.NotNil(t, restConfig.ExecProvider)
require.Equal(t, []string{"login", "oidc"}, restConfig.ExecProvider.Args[:2])
kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml")
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
// Wait 10 seconds for the JWTAuthenticator to become initialized.
// TODO: remove this sleep once we have fixed the initialization problem.
t.Log("sleeping 10s to wait for JWTAuthenticator to become initialized")
time.Sleep(10 * time.Second)
// Run "kubectl get namespaces" which should trigger a browser login via the plugin.
start := time.Now()
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
stderrPipe, err := kubectlCmd.StderrPipe()
require.NoError(t, err)
stdoutPipe, err := kubectlCmd.StdoutPipe()
require.NoError(t, err)
t.Logf("starting kubectl subprocess")
require.NoError(t, kubectlCmd.Start())
t.Cleanup(func() {
err := kubectlCmd.Wait()
t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode())
require.NoErrorf(t, err, "kubectl process did not exit cleanly")
})
// Start a background goroutine to read stderr from the CLI and parse out the login URL.
loginURLChan := make(chan string)
spawnTestGoroutine(t, func() (err error) {
defer func() {
closeErr := stderrPipe.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
if err == nil {
err = fmt.Errorf("stderr stream closed with error: %w", closeErr)
}
}()
reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderrPipe))
line, err := reader.ReadString('\n')
if err != nil {
return fmt.Errorf("could not read login URL line from stderr: %w", err)
}
const prompt = "Please log in: "
if !strings.HasPrefix(line, prompt) {
return fmt.Errorf("expected %q to have prefix %q", line, prompt)
}
loginURLChan <- strings.TrimPrefix(line, prompt)
return readAndExpectEmpty(reader)
})
// Start a background goroutine to read stdout from kubectl and return the result as a string.
kubectlOutputChan := make(chan string)
spawnTestGoroutine(t, func() (err error) {
defer func() {
closeErr := stdoutPipe.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
if err == nil {
err = fmt.Errorf("stdout stream closed with error: %w", closeErr)
}
}()
output, err := ioutil.ReadAll(stdoutPipe)
if err != nil {
return err
}
t.Logf("kubectl output:\n%s\n", output)
kubectlOutputChan <- string(output)
return nil
})
// Wait for the CLI to print out the login URL and open the browser to it.
t.Logf("waiting for CLI to output login URL")
var loginURL string
select {
case <-time.After(1 * time.Minute):
require.Fail(t, "timed out waiting for login URL")
case loginURL = <-loginURLChan:
}
t.Logf("navigating to login page")
require.NoError(t, page.Navigate(loginURL))
// Expect to be redirected to the upstream provider and log in.
browsertest.LoginToUpstream(t, page, env.SupervisorTestUpstream)
// Expect to be redirected to the localhost callback.
t.Logf("waiting for redirect to callback")
browsertest.WaitForURL(t, page, regexp.MustCompile(`\Ahttp://127\.0\.0\.1:[0-9]+/callback\?.+\z`))
// Wait for the "pre" element that gets rendered for a `text/plain` page, and
// assert that it contains the success message.
t.Logf("verifying success page")
browsertest.WaitForVisibleElements(t, page, "pre")
msg, err := page.First("pre").Text()
require.NoError(t, err)
require.Equal(t, "you have been logged in and may now close this tab", msg)
// Expect the CLI to output a list of namespaces in JSON format.
t.Logf("waiting for kubectl to output namespace list JSON")
var kubectlOutput string
select {
case <-time.After(10 * time.Second):
require.Fail(t, "timed out waiting for kubectl output")
case kubectlOutput = <-kubectlOutputChan:
}
require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned")
t.Logf("first kubectl command took %s", time.Since(start).String())
// Run kubectl again, which should work with no browser interaction.
kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...)
start = time.Now()
kubectlOutput2, err := kubectlCmd2.CombinedOutput()
require.NoError(t, err)
require.Greaterf(t, len(bytes.Split(kubectlOutput2, []byte("\n"))), 2, "expected some namespaces to be returned again")
t.Logf("second kubectl command took %s", time.Since(start).String())
}

View File

@@ -165,8 +165,12 @@ func TestSupervisorLogin(t *testing.T) {
authcode := callback.URL.Query().Get("code")
require.NotEmpty(t, authcode)
// Call the token endpoint to get tokens.
tokenResponse, err := downstreamOAuth2Config.Exchange(oidcHTTPClientContext, authcode, pkceParam.Verifier())
// Call the token endpoint to get tokens. Give the Supervisor a couple of seconds to wire up its signing key.
var tokenResponse *oauth2.Token
assert.Eventually(t, func() bool {
tokenResponse, err = downstreamOAuth2Config.Exchange(oidcHTTPClientContext, authcode, pkceParam.Verifier())
return err == nil
}, time.Second*5, time.Second*1)
require.NoError(t, err)
expectedIDTokenClaims := []string{"iss", "exp", "sub", "aud", "auth_time", "iat", "jti", "nonce", "rat", "username"}

45
test/library/cli.go Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package library
import (
"io/ioutil"
"os/exec"
"path/filepath"
"sync"
"testing"
"github.com/stretchr/testify/require"
"go.pinniped.dev/internal/testutil"
)
//nolint: gochecknoglobals
var pinnipedCLIBinaryCache struct {
buf []byte
mutex sync.Mutex
}
// PinnipedCLIPath returns the path to the Pinniped CLI binary, built on demand and cached between tests.
func PinnipedCLIPath(t *testing.T) string {
t.Helper()
pinnipedCLIBinaryCache.mutex.Lock()
defer pinnipedCLIBinaryCache.mutex.Unlock()
path := filepath.Join(testutil.TempDir(t), "pinniped")
if pinnipedCLIBinaryCache.buf != nil {
t.Log("using previously built pinniped CLI binary")
require.NoError(t, ioutil.WriteFile(path, pinnipedCLIBinaryCache.buf, 0500))
return path
}
t.Log("building pinniped CLI binary")
output, err := exec.Command("go", "build", "-o", path, "go.pinniped.dev/cmd/pinniped").CombinedOutput()
require.NoError(t, err, string(output))
// Fill our cache so we don't have to do this again.
pinnipedCLIBinaryCache.buf, err = ioutil.ReadFile(path)
require.NoError(t, err, string(output))
return path
}

View File

@@ -15,8 +15,10 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -49,7 +51,10 @@ func NewClientset(t *testing.T) kubernetes.Interface {
func NewClientsetForKubeConfig(t *testing.T, kubeConfig string) kubernetes.Interface {
t.Helper()
return newClientsetWithConfig(t, NewRestConfigFromKubeconfig(t, kubeConfig))
}
func NewRestConfigFromKubeconfig(t *testing.T, kubeConfig string) *rest.Config {
kubeConfigFile, err := ioutil.TempFile("", "pinniped-cli-test-*")
require.NoError(t, err)
defer os.Remove(kubeConfigFile.Name())
@@ -59,8 +64,7 @@ func NewClientsetForKubeConfig(t *testing.T, kubeConfig string) kubernetes.Inter
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigFile.Name())
require.NoError(t, err)
return newClientsetWithConfig(t, restConfig)
return restConfig
}
func NewClientsetWithCertAndKey(t *testing.T, clientCertificateData, clientKeyData string) kubernetes.Interface {
@@ -164,13 +168,38 @@ func CreateTestWebhookAuthenticator(ctx context.Context, t *testing.T) corev1.Ty
}
}
// CreateTestJWTAuthenticator creates and returns a test JWTAuthenticator in
// CreateTestJWTAuthenticatorForCLIUpstream creates and returns a test JWTAuthenticator in
// $PINNIPED_TEST_CONCIERGE_NAMESPACE, which will be automatically deleted at the end of the current
// test's lifetime. It returns a corev1.TypedLocalObjectReference which describes the test JWT
// authenticator within the test namespace.
//
// CreateTestJWTAuthenticator gets the OIDC issuer info from IntegrationEnv().CLITestUpstream.
func CreateTestJWTAuthenticator(ctx context.Context, t *testing.T, usernameClaim string) corev1.TypedLocalObjectReference {
// CreateTestJWTAuthenticatorForCLIUpstream gets the OIDC issuer info from IntegrationEnv().CLITestUpstream.
func CreateTestJWTAuthenticatorForCLIUpstream(ctx context.Context, t *testing.T) corev1.TypedLocalObjectReference {
t.Helper()
testEnv := IntegrationEnv(t)
spec := auth1alpha1.JWTAuthenticatorSpec{
Issuer: testEnv.CLITestUpstream.Issuer,
Audience: testEnv.CLITestUpstream.ClientID,
// The default UsernameClaim is "username" but the upstreams that we use for
// integration tests won't necessarily have that claim, so use "sub" here.
UsernameClaim: "sub",
}
// If the test upstream does not have a CA bundle specified, then don't configure one in the
// JWTAuthenticator. Leaving TLSSpec set to nil will result in OIDC discovery using the OS's root
// CA store.
if testEnv.CLITestUpstream.CABundle != "" {
spec.TLS = &auth1alpha1.TLSSpec{
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(testEnv.CLITestUpstream.CABundle)),
}
}
return CreateTestJWTAuthenticator(ctx, t, spec)
}
// CreateTestJWTAuthenticator creates and returns a test JWTAuthenticator in
// $PINNIPED_TEST_CONCIERGE_NAMESPACE, which will be automatically deleted at the end of the current
// test's lifetime. It returns a corev1.TypedLocalObjectReference which describes the test JWT
// authenticator within the test namespace.
func CreateTestJWTAuthenticator(ctx context.Context, t *testing.T, spec auth1alpha1.JWTAuthenticatorSpec) corev1.TypedLocalObjectReference {
t.Helper()
testEnv := IntegrationEnv(t)
@@ -180,24 +209,9 @@ func CreateTestJWTAuthenticator(ctx context.Context, t *testing.T, usernameClaim
createContext, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// If the test upstream does not have a CA bundle specified, then don't configure one in the
// JWTAuthenticator. Leaving TLSSpec set to nil will result in OIDC discovery using the OS's root
// CA store.
tlsSpec := &auth1alpha1.TLSSpec{
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(testEnv.CLITestUpstream.CABundle)),
}
if testEnv.CLITestUpstream.CABundle == "" {
tlsSpec = nil
}
jwtAuthenticator, err := jwtAuthenticators.Create(createContext, &auth1alpha1.JWTAuthenticator{
ObjectMeta: testObjectMeta(t, "jwt-authenticator"),
Spec: auth1alpha1.JWTAuthenticatorSpec{
Issuer: testEnv.CLITestUpstream.Issuer,
Audience: testEnv.CLITestUpstream.ClientID,
TLS: tlsSpec,
UsernameClaim: usernameClaim,
},
Spec: spec,
}, metav1.CreateOptions{})
require.NoError(t, err, "could not create test JWTAuthenticator")
t.Logf("created test JWTAuthenticator %s/%s", jwtAuthenticator.Namespace, jwtAuthenticator.Name)
@@ -232,7 +246,7 @@ func CreateTestOIDCProvider(ctx context.Context, t *testing.T, issuer string, ce
defer cancel()
if issuer == "" {
issuer = randomIssuer(t)
issuer = fmt.Sprintf("http://test-issuer-%s.pinniped.dev", RandHex(t, 8))
}
opcs := NewSupervisorClientset(t).ConfigV1alpha1().OIDCProviders(testEnv.SupervisorNamespace)
@@ -266,21 +280,22 @@ func CreateTestOIDCProvider(ctx context.Context, t *testing.T, issuer string, ce
// Wait for the OIDCProvider to enter the expected phase (or time out).
var result *configv1alpha1.OIDCProvider
require.Eventuallyf(t, func() bool {
assert.Eventuallyf(t, func() bool {
var err error
result, err = opcs.Get(ctx, opc.Name, metav1.GetOptions{})
require.NoError(t, err)
return result.Status.Status == expectStatus
}, 60*time.Second, 1*time.Second, "expected the UpstreamOIDCProvider to go into phase %s", expectStatus)
}, 60*time.Second, 1*time.Second, "expected the OIDCProvider to have status %q", expectStatus)
require.Equal(t, expectStatus, result.Status.Status)
return opc
}
func randomIssuer(t *testing.T) string {
var buf [8]byte
_, err := io.ReadFull(rand.Reader, buf[:])
func RandHex(t *testing.T, numBytes int) string {
buf := make([]byte, numBytes)
_, err := io.ReadFull(rand.Reader, buf)
require.NoError(t, err)
return fmt.Sprintf("http://test-issuer-%s.pinniped.dev", hex.EncodeToString(buf[:]))
return hex.EncodeToString(buf)
}
func CreateTestSecret(t *testing.T, namespace string, baseName string, secretType string, stringData map[string]string) *corev1.Secret {
@@ -297,6 +312,7 @@ func CreateTestSecret(t *testing.T, namespace string, baseName string, secretTyp
require.NoError(t, err)
t.Cleanup(func() {
t.Logf("cleaning up test Secret %s/%s", created.Namespace, created.Name)
err := client.CoreV1().Secrets(namespace).Delete(context.Background(), created.Name, metav1.DeleteOptions{})
require.NoError(t, err)
})
@@ -309,7 +325,7 @@ func CreateClientCredsSecret(t *testing.T, clientID string, clientSecret string)
env := IntegrationEnv(t)
return CreateTestSecret(t,
env.SupervisorNamespace,
"test-client-creds",
"client-creds",
"secrets.pinniped.dev/oidc-client",
map[string]string{
"clientID": clientID,
@@ -336,6 +352,7 @@ func CreateTestUpstreamOIDCProvider(t *testing.T, spec idpv1alpha1.UpstreamOIDCP
// Always clean this up after this point.
t.Cleanup(func() {
t.Logf("cleaning up test UpstreamOIDCProvider %s/%s", created.Namespace, created.Name)
err := upstreams.Delete(context.Background(), created.Name, metav1.DeleteOptions{})
require.NoError(t, err)
})
@@ -352,6 +369,31 @@ func CreateTestUpstreamOIDCProvider(t *testing.T, spec idpv1alpha1.UpstreamOIDCP
return result
}
func CreateTestClusterRoleBinding(t *testing.T, subject rbacv1.Subject, roleRef rbacv1.RoleRef) *rbacv1.ClusterRoleBinding {
t.Helper()
client := NewClientset(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Create the ClusterRoleBinding using GenerateName to get a random name.
clusterRoles := client.RbacV1().ClusterRoleBindings()
created, err := clusterRoles.Create(ctx, &rbacv1.ClusterRoleBinding{
ObjectMeta: testObjectMeta(t, "cluster-role"),
Subjects: []rbacv1.Subject{subject},
RoleRef: roleRef,
}, metav1.CreateOptions{})
require.NoError(t, err)
t.Logf("created test ClusterRoleBinding %s", created.Name)
t.Cleanup(func() {
t.Logf("cleaning up test ClusterRoleBinding %s", created.Name)
err := clusterRoles.Delete(context.Background(), created.Name, metav1.DeleteOptions{})
require.NoError(t, err)
})
return created
}
func testObjectMeta(t *testing.T, baseName string) metav1.ObjectMeta {
return metav1.ObjectMeta{
GenerateName: fmt.Sprintf("test-%s-", baseName),

View File

@@ -59,6 +59,14 @@ type TestOIDCUpstream struct {
Password string `json:"password"`
}
// ProxyEnv returns a set of environment variable strings (e.g., to combine with os.Environ()) which set up the configured test HTTP proxy.
func (e *TestEnv) ProxyEnv() []string {
if e.Proxy == "" {
return nil
}
return []string{"http_proxy=" + e.Proxy, "https_proxy=" + e.Proxy, "no_proxy=127.0.0.1"}
}
// IntegrationEnv gets the integration test environment from OS environment variables. This
// method also implies SkipUnlessIntegration().
func IntegrationEnv(t *testing.T) *TestEnv {