mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-08 07:11:53 +00:00
Allow multiple Pinnipeds to work on same cluster
Yes, this is a huge commit.
The middleware allows you to customize the API groups of all of the
*.pinniped.dev API groups.
Some notes about other small things in this commit:
- We removed the internal/client package in favor of pkg/conciergeclient. The
two packages do basically the same thing. I don't think we use the former
anymore.
- We re-enabled cluster-scoped owner assertions in the integration tests.
This code was added in internal/ownerref. See a0546942 for when this
assertion was removed.
- Note: the middlware code is in charge of restoring the GV of a request object,
so we should never need to write mutations that do that.
- We updated the supervisor secret generation to no longer manually set an owner
reference to the deployment since the middleware code now does this. I think we
still need some way to make an initial event for the secret generator
controller, which involves knowing the namespace and the name of the generated
secret, so I still wired the deployment through. We could use a namespace/name
tuple here, but I was lazy.
Signed-off-by: Andrew Keesler <akeesler@vmware.com>
Co-authored-by: Ryan Richard <richardry@vmware.com>
This commit is contained in:
@@ -57,6 +57,7 @@ func TestCLIGetKubeconfigStaticToken(t *testing.T) {
|
||||
"--pinniped-namespace", env.ConciergeNamespace,
|
||||
"--authenticator-type", "webhook",
|
||||
"--authenticator-name", authenticator.Name,
|
||||
"--api-group-suffix", env.APIGroupSuffix,
|
||||
},
|
||||
expectStderr: "Command \"get-kubeconfig\" is deprecated, Please use `pinniped get kubeconfig` instead.\n",
|
||||
},
|
||||
@@ -84,16 +85,15 @@ func TestCLIGetKubeconfigStaticToken(t *testing.T) {
|
||||
|
||||
// In addition to the client-go based testing below, also try the kubeconfig
|
||||
// with kubectl to validate that it works.
|
||||
adminClient := library.NewClientset(t)
|
||||
t.Run(
|
||||
"access as user with kubectl",
|
||||
library.AccessAsUserWithKubectlTest(ctx, adminClient, stdout, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
|
||||
library.AccessAsUserWithKubectlTest(stdout, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
|
||||
)
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run(
|
||||
"access as group "+group+" with kubectl",
|
||||
library.AccessAsGroupWithKubectlTest(ctx, adminClient, stdout, group, env.ConciergeNamespace),
|
||||
library.AccessAsGroupWithKubectlTest(stdout, group, env.ConciergeNamespace),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -101,10 +101,10 @@ func TestCLIGetKubeconfigStaticToken(t *testing.T) {
|
||||
kubeClient := library.NewClientsetForKubeConfig(t, stdout)
|
||||
|
||||
// Validate that we can auth to the API via our user.
|
||||
t.Run("access as user with client-go", library.AccessAsUserTest(ctx, adminClient, env.TestUser.ExpectedUsername, kubeClient))
|
||||
t.Run("access as user with client-go", library.AccessAsUserTest(ctx, env.TestUser.ExpectedUsername, kubeClient))
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run("access as group "+group+" with client-go", library.AccessAsGroupTest(ctx, adminClient, group, kubeClient))
|
||||
t.Run("access as group "+group+" with client-go", library.AccessAsGroupTest(ctx, group, kubeClient))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user