login oidc cmd checks access token expiry before doing token exchange

In the RFC8693 token exchange, the CLI sends your access token and
receives in exchange a new cluster-scoped ID token.

Fix a bug in the CLI. Whenever the "pinniped login oidc" command was
planning to perform the RFC8693 token exchange, it failed to check if
the cached access token was still valid before performing the exchange,
which sends the access token. It instead checked if the cached ID token
was still valid, but that it not relevant in this situation because the
ID token is not going to be used for anything (instead the new ID token
returned by the RFC8693 token exchange will be used for auth).

This bug doesn't actually matter today, because the Supervisor-issued
access and ID tokens always both have the same 2-minute lifetimes.
However, future enhancements may cause them to have different lifetimes
in certain circumstances. Fixing this CLI bug now to prepare for those
potential future enhancements.
This commit is contained in:
Ryan Richard
2024-02-09 11:35:02 -08:00
parent d5ce48bd4b
commit dce9409ccc
6 changed files with 325 additions and 111 deletions

View File

@@ -1,4 +1,4 @@
// Copyright 2022-2023 the Pinniped contributors. All Rights Reserved.
// Copyright 2022-2024 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package integration
@@ -209,12 +209,14 @@ func TestSupervisorWarnings_Browser(t *testing.T) {
err = os.Remove(credentialCachePath)
require.NoError(t, err)
// wait for the existing tokens to expire, triggering the refresh flow.
ctx2, cancel2 := context.WithTimeout(ctx, 1*time.Minute)
defer cancel2()
// The cached access token is valid for 2 minutes, and can be used to perform the token exchange again
// during that time. Remove it to force the refresh flow to happen on the next kubectl invocation,
// without needing to wait for the access token to expire first.
token.AccessToken = nil
cache.PutToken(sessionCacheKey, token)
// Run kubectl, which should work without any prompting for authentication.
kubectlCmd2 := exec.CommandContext(ctx2, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...)
startTime2 := time.Now()
var kubectlStdoutPipe2 io.ReadCloser
@@ -253,7 +255,7 @@ func TestSupervisorWarnings_Browser(t *testing.T) {
expectedUsername, password := testlib.CreateFreshADTestUser(t, env)
sAMAccountName := expectedUsername + "@" + env.SupervisorUpstreamActiveDirectory.Domain
setupClusterForEndToEndActiveDirectoryTest(t, sAMAccountName, env)
createdProvider := setupClusterForEndToEndActiveDirectoryTest(t, sAMAccountName, env)
testlib.WaitForFederationDomainStatusPhase(ctx, t, downstream.Name, configv1alpha1.FederationDomainPhaseReady)
// Use a specific session cache for this test.
@@ -309,6 +311,24 @@ func TestSupervisorWarnings_Browser(t *testing.T) {
t.Logf("first kubectl command took %s", time.Since(start).String())
cache := filesession.New(sessionCachePath, filesession.WithErrorReporter(func(err error) {
require.NoError(t, err)
}))
// construct the cache key
downstreamScopes := []string{"offline_access", "openid", "pinniped:request-audience", "groups"}
sort.Strings(downstreamScopes)
sessionCacheKey := oidcclient.SessionCacheKey{
Issuer: downstream.Spec.Issuer,
ClientID: "pinniped-cli",
Scopes: downstreamScopes,
RedirectURI: "http://localhost:0/callback",
UpstreamProviderName: createdProvider.Name,
}
// use it to get the cache entry
token := cache.GetToken(sessionCacheKey)
require.NotNil(t, token)
// create an active directory group, and add our user to it.
groupName := testlib.CreateFreshADTestGroup(t, env)
testlib.AddTestUserToGroup(t, env, groupName, expectedUsername)
@@ -317,11 +337,14 @@ func TestSupervisorWarnings_Browser(t *testing.T) {
err = os.Remove(credentialCachePath)
require.NoError(t, err)
ctx2, cancel2 := context.WithTimeout(ctx, 1*time.Minute)
defer cancel2()
// The cached access token is valid for 2 minutes, and can be used to perform the token exchange again
// during that time. Remove it to force the refresh flow to happen on the next kubectl invocation,
// without needing to wait for the access token to expire first.
token.AccessToken = nil
cache.PutToken(sessionCacheKey, token)
// Run kubectl, which should work without any prompting for authentication.
kubectlCmd2 := exec.CommandContext(ctx2, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...)
startTime2 := time.Now()
var kubectlStdoutPipe2 io.ReadCloser
@@ -520,12 +543,14 @@ func TestSupervisorWarnings_Browser(t *testing.T) {
err = os.Remove(credentialCachePath)
require.NoError(t, err)
// wait for the existing tokens to expire, triggering the refresh flow.
ctx2, cancel2 := context.WithTimeout(ctx, 1*time.Minute)
defer cancel2()
// The cached access token is valid for 2 minutes, and can be used to perform the token exchange again
// during that time. Remove it to force the refresh flow to happen on the next kubectl invocation,
// without needing to wait for the access token to expire first.
token.AccessToken = nil
cache.PutToken(sessionCacheKey, token)
// Run kubectl, which should work without any prompting for authentication.
kubectlCmd2 := exec.CommandContext(ctx2, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...)
startTime2 := time.Now()
var kubectlStdoutPipe2 io.ReadCloser