mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-08 07:11:53 +00:00
Merge branch 'main' into oidc_password_grant
This commit is contained in:
@@ -102,7 +102,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
||||
|
||||
// Wire up the impersonation proxy signer CA as another valid authenticator for client cert auth,
|
||||
// along with the Kube API server's CA.
|
||||
// Note: any changes to the the Authentication stack need to be kept in sync with any assumptions made
|
||||
// Note: any changes to the Authentication stack need to be kept in sync with any assumptions made
|
||||
// by getTransportForUser, especially if we ever update the TCR API to start returning bearer tokens.
|
||||
kubeClientUnsafeForProxying, err := kubeclient.New(clientOpts...)
|
||||
if err != nil {
|
||||
|
||||
@@ -297,12 +297,7 @@ func TestExpirerControllerSync(t *testing.T) {
|
||||
|
||||
if test.wantDelete {
|
||||
require.Len(t, *opts, 1)
|
||||
require.Equal(t, metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: &testUID,
|
||||
ResourceVersion: &testRV,
|
||||
},
|
||||
}, (*opts)[0])
|
||||
require.Equal(t, testutil.NewPreconditions(testUID, testRV), (*opts)[0])
|
||||
} else {
|
||||
require.Len(t, *opts, 0)
|
||||
}
|
||||
|
||||
@@ -29,14 +29,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
||||
@@ -1032,13 +1030,7 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
||||
// validate that we set delete preconditions correctly
|
||||
r.NotEmpty(*deleteOptions)
|
||||
for _, opt := range *deleteOptions {
|
||||
uid := types.UID("uid-1234")
|
||||
r.Equal(metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: &uid,
|
||||
ResourceVersion: pointer.String("rv-5678"),
|
||||
},
|
||||
}, opt)
|
||||
r.Equal(testutil.NewPreconditions("uid-1234", "rv-5678"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,12 +40,29 @@ func NewLegacyPodCleanerController(
|
||||
controllerlib.Config{
|
||||
Name: "legacy-pod-cleaner-controller",
|
||||
Syncer: controllerlib.SyncFunc(func(ctx controllerlib.Context) error {
|
||||
if err := client.Kubernetes.CoreV1().Pods(ctx.Key.Namespace).Delete(ctx.Context, ctx.Key.Name, metav1.DeleteOptions{}); err != nil {
|
||||
podClient := client.Kubernetes.CoreV1().Pods(ctx.Key.Namespace)
|
||||
|
||||
// avoid blind writes to the API
|
||||
agentPod, err := podClient.Get(ctx.Context, ctx.Key.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not get legacy agent pod: %w", err)
|
||||
}
|
||||
|
||||
if err := podClient.Delete(ctx.Context, ctx.Key.Name, metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: &agentPod.UID,
|
||||
ResourceVersion: &agentPod.ResourceVersion,
|
||||
},
|
||||
}); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not delete legacy agent pod: %w", err)
|
||||
}
|
||||
|
||||
log.Info("deleted legacy kube-cert-agent pod", "pod", klog.KRef(ctx.Key.Namespace, ctx.Key.Name))
|
||||
return nil
|
||||
}),
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/internal/testutil/testlogger"
|
||||
)
|
||||
|
||||
@@ -28,9 +29,11 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
|
||||
legacyAgentPodWithoutExtraLabel := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "concierge",
|
||||
Name: "pinniped-concierge-kube-cert-agent-without-extra-label",
|
||||
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "true"},
|
||||
Namespace: "concierge",
|
||||
Name: "pinniped-concierge-kube-cert-agent-without-extra-label",
|
||||
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "true"},
|
||||
UID: "1",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: corev1.PodSpec{},
|
||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
||||
@@ -40,10 +43,14 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
legacyAgentPodWithExtraLabel.Name = "pinniped-concierge-kube-cert-agent-with-extra-label"
|
||||
legacyAgentPodWithExtraLabel.Labels["extralabel"] = "labelvalue"
|
||||
legacyAgentPodWithExtraLabel.Labels["anotherextralabel"] = "labelvalue"
|
||||
legacyAgentPodWithExtraLabel.UID = "3"
|
||||
legacyAgentPodWithExtraLabel.ResourceVersion = "4"
|
||||
|
||||
nonLegacyAgentPod := legacyAgentPodWithExtraLabel.DeepCopy()
|
||||
nonLegacyAgentPod.Name = "pinniped-concierge-kube-cert-agent-not-legacy"
|
||||
nonLegacyAgentPod.Labels["kube-cert-agent.pinniped.dev"] = "v2"
|
||||
nonLegacyAgentPod.UID = "5"
|
||||
nonLegacyAgentPod.ResourceVersion = "6"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -52,10 +59,12 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
wantDistinctErrors []string
|
||||
wantDistinctLogs []string
|
||||
wantActions []coretesting.Action
|
||||
wantDeleteOptions []metav1.DeleteOptions
|
||||
}{
|
||||
{
|
||||
name: "no pods",
|
||||
wantActions: []coretesting.Action{},
|
||||
name: "no pods",
|
||||
wantActions: []coretesting.Action{},
|
||||
wantDeleteOptions: []metav1.DeleteOptions{},
|
||||
},
|
||||
{
|
||||
name: "mix of pods",
|
||||
@@ -69,8 +78,12 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
`legacy-pod-cleaner-controller "level"=0 "msg"="deleted legacy kube-cert-agent pod" "pod"={"name":"pinniped-concierge-kube-cert-agent-with-extra-label","namespace":"concierge"}`,
|
||||
},
|
||||
wantActions: []coretesting.Action{ // the first delete triggers the informer again, but the second invocation triggers a Not Found
|
||||
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
wantDeleteOptions: []metav1.DeleteOptions{
|
||||
testutil.NewPreconditions("3", "4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -89,9 +102,15 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
"could not delete legacy agent pod: some delete error",
|
||||
},
|
||||
wantActions: []coretesting.Action{
|
||||
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
wantDeleteOptions: []metav1.DeleteOptions{
|
||||
testutil.NewPreconditions("3", "4"),
|
||||
testutil.NewPreconditions("3", "4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fail to delete because of not found error",
|
||||
@@ -107,8 +126,30 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
},
|
||||
wantDistinctErrors: []string{""},
|
||||
wantActions: []coretesting.Action{
|
||||
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
wantDeleteOptions: []metav1.DeleteOptions{
|
||||
testutil.NewPreconditions("3", "4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fail to delete because of not found error on get",
|
||||
kubeObjects: []runtime.Object{
|
||||
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||
legacyAgentPodWithExtraLabel, // should be deleted
|
||||
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||
},
|
||||
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||
clientset.PrependReactor("get", "*", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, k8serrors.NewNotFound(action.GetResource().GroupResource(), "")
|
||||
})
|
||||
},
|
||||
wantDistinctErrors: []string{""},
|
||||
wantActions: []coretesting.Action{
|
||||
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
wantDeleteOptions: []metav1.DeleteOptions{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -120,6 +161,10 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
if tt.addKubeReactions != nil {
|
||||
tt.addKubeReactions(kubeClientset)
|
||||
}
|
||||
|
||||
opts := &[]metav1.DeleteOptions{}
|
||||
trackDeleteClient := testutil.NewDeleteOptionsRecorder(kubeClientset, opts)
|
||||
|
||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||
log := testlogger.New(t)
|
||||
controller := NewLegacyPodCleanerController(
|
||||
@@ -127,7 +172,7 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
Namespace: "concierge",
|
||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||
},
|
||||
&kubeclient.Client{Kubernetes: kubeClientset},
|
||||
&kubeclient.Client{Kubernetes: trackDeleteClient},
|
||||
kubeInformers.Core().V1().Pods(),
|
||||
log,
|
||||
controllerlib.WithMaxRetries(1),
|
||||
@@ -140,6 +185,7 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
||||
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
|
||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||
assert.Equal(t, tt.wantActions, kubeClientset.Actions()[2:], "unexpected actions")
|
||||
assert.Equal(t, tt.wantDeleteOptions, *opts, "unexpected delete options")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,7 +102,12 @@ func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error {
|
||||
}
|
||||
|
||||
if garbageCollectAfterTime.Before(frozenClock.Now()) {
|
||||
err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{})
|
||||
err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: &secret.UID,
|
||||
ResourceVersion: &secret.ResourceVersion,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
plog.WarningErr("failed to garbage collect resource", err, logKV(secret))
|
||||
continue
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
|
||||
@@ -116,6 +117,8 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
subject controllerlib.Controller
|
||||
kubeInformerClient *kubernetesfake.Clientset
|
||||
kubeClient *kubernetesfake.Clientset
|
||||
deleteOptions *[]metav1.DeleteOptions
|
||||
deleteOptionsRecorder kubernetes.Interface
|
||||
kubeInformers kubeinformers.SharedInformerFactory
|
||||
cancelContext context.Context
|
||||
cancelContextCancelFunc context.CancelFunc
|
||||
@@ -130,7 +133,7 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
// Set this at the last second to allow for injection of server override.
|
||||
subject = GarbageCollectorController(
|
||||
fakeClock,
|
||||
kubeClient,
|
||||
deleteOptionsRecorder,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
)
|
||||
@@ -158,6 +161,8 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
|
||||
kubeInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
kubeClient = kubernetesfake.NewSimpleClientset()
|
||||
deleteOptions = &[]metav1.DeleteOptions{}
|
||||
deleteOptionsRecorder = testutil.NewDeleteOptionsRecorder(kubeClient, deleteOptions)
|
||||
kubeInformers = kubeinformers.NewSharedInformerFactory(kubeInformerClient, 0)
|
||||
frozenNow = time.Now().UTC()
|
||||
fakeClock = clock.NewFakeClock(frozenNow)
|
||||
@@ -193,8 +198,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
it.Before(func() {
|
||||
firstExpiredSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first expired secret",
|
||||
Namespace: installedInNamespace,
|
||||
Name: "first expired secret",
|
||||
Namespace: installedInNamespace,
|
||||
UID: "uid-123",
|
||||
ResourceVersion: "rv-456",
|
||||
Annotations: map[string]string{
|
||||
"storage.pinniped.dev/garbage-collect-after": frozenNow.Add(-time.Second).Format(time.RFC3339),
|
||||
},
|
||||
@@ -204,8 +211,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
r.NoError(kubeClient.Tracker().Add(firstExpiredSecret))
|
||||
secondExpiredSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "second expired secret",
|
||||
Namespace: installedInNamespace,
|
||||
Name: "second expired secret",
|
||||
Namespace: installedInNamespace,
|
||||
UID: "uid-789",
|
||||
ResourceVersion: "rv-555",
|
||||
Annotations: map[string]string{
|
||||
"storage.pinniped.dev/garbage-collect-after": frozenNow.Add(-2 * time.Second).Format(time.RFC3339),
|
||||
},
|
||||
@@ -237,6 +246,13 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
},
|
||||
kubeClient.Actions(),
|
||||
)
|
||||
r.ElementsMatch(
|
||||
[]metav1.DeleteOptions{
|
||||
testutil.NewPreconditions("uid-123", "rv-456"),
|
||||
testutil.NewPreconditions("uid-789", "rv-555"),
|
||||
},
|
||||
*deleteOptions,
|
||||
)
|
||||
list, err := kubeClient.CoreV1().Secrets(installedInNamespace).List(context.Background(), metav1.ListOptions{})
|
||||
r.NoError(err)
|
||||
r.Len(list.Items, 2)
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/groupsuffix"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/leaderelection"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -97,7 +98,7 @@ type Config struct {
|
||||
func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(c.APIGroupSuffix)
|
||||
|
||||
dref, _, err := deploymentref.New(c.ServerInstallationInfo)
|
||||
dref, deployment, err := deploymentref.New(c.ServerInstallationInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create deployment ref: %w", err)
|
||||
}
|
||||
@@ -107,7 +108,9 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
return nil, fmt.Errorf("cannot create API service ref: %w", err)
|
||||
}
|
||||
|
||||
client, err := kubeclient.New(
|
||||
client, leaderElector, err := leaderelection.New(
|
||||
c.ServerInstallationInfo,
|
||||
deployment,
|
||||
dref, // first try to use the deployment as an owner ref (for namespace scoped resources)
|
||||
apiServiceRef, // fallback to our API service (for everything else we create)
|
||||
kubeclient.WithMiddleware(groupsuffix.New(c.APIGroupSuffix)),
|
||||
@@ -303,7 +306,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
// Return a function which starts the informers and controllers.
|
||||
return func(ctx context.Context) {
|
||||
informers.startAndWaitForSync(ctx)
|
||||
go controllerManager.Start(ctx)
|
||||
go leaderElector(ctx, controllerManager.Start)
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ func handleOtherVerbs(
|
||||
|
||||
result, err := middlewareReq.mutateRequest(obj)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
return true, nil, fmt.Errorf("middleware request for %#v failed to mutate: %w", middlewareReq, err)
|
||||
}
|
||||
|
||||
if !result.mutated {
|
||||
@@ -231,7 +231,7 @@ func handleCreateOrUpdate(
|
||||
|
||||
result, err := middlewareReq.mutateRequest(obj)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
return true, nil, fmt.Errorf("middleware request for %#v failed to mutate: %w", middlewareReq, err)
|
||||
}
|
||||
|
||||
if !result.mutated {
|
||||
|
||||
150
internal/leaderelection/leaderelection.go
Normal file
150
internal/leaderelection/leaderelection.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package leaderelection
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
const ErrNotLeader constable.Error = "write attempt rejected as client is not leader"
|
||||
|
||||
// New returns a client that has a leader election middleware injected into it.
|
||||
// This middleware will prevent all non-read requests to the Kubernetes API when
|
||||
// the current process does not hold the leader election lock. Unlike normal
|
||||
// leader election where the process blocks until it acquires the lock, this
|
||||
// middleware approach lets the process run as normal for all read requests.
|
||||
// Another difference is that if the process acquires the lock and then loses it
|
||||
// (i.e. a failed renewal), it will not exit (i.e. restart). Instead, it will
|
||||
// simply attempt to acquire the lock again.
|
||||
//
|
||||
// The returned function is blocking and will run the leader election polling
|
||||
// logic and will coordinate lease release with the input controller starter function.
|
||||
func New(podInfo *downward.PodInfo, deployment *appsv1.Deployment, opts ...kubeclient.Option) (
|
||||
*kubeclient.Client,
|
||||
func(context.Context, func(context.Context)),
|
||||
error,
|
||||
) {
|
||||
internalClient, err := kubeclient.New(opts...)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create internal client for leader election: %w", err)
|
||||
}
|
||||
|
||||
isLeader := atomic.NewBool(false)
|
||||
|
||||
identity := podInfo.Name
|
||||
leaseName := deployment.Name
|
||||
|
||||
leaderElectionConfig := leaderelection.LeaderElectionConfig{
|
||||
Lock: &resourcelock.LeaseLock{
|
||||
LeaseMeta: metav1.ObjectMeta{
|
||||
Namespace: podInfo.Namespace,
|
||||
Name: leaseName,
|
||||
},
|
||||
Client: internalClient.Kubernetes.CoordinationV1(),
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: identity,
|
||||
},
|
||||
},
|
||||
ReleaseOnCancel: true, // semantics for correct release handled by controllersWithLeaderElector below
|
||||
LeaseDuration: 60 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(_ context.Context) {
|
||||
plog.Debug("leader gained", "identity", identity)
|
||||
isLeader.Store(true)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
plog.Debug("leader lost", "identity", identity)
|
||||
isLeader.Store(false)
|
||||
},
|
||||
OnNewLeader: func(newLeader string) {
|
||||
if newLeader == identity {
|
||||
return
|
||||
}
|
||||
plog.Debug("new leader elected", "newLeader", newLeader)
|
||||
},
|
||||
},
|
||||
Name: leaseName,
|
||||
// this must be set to nil because we do not want to associate /healthz with a failed
|
||||
// leader election renewal as we do not want to exit the process if the leader changes.
|
||||
WatchDog: nil,
|
||||
}
|
||||
|
||||
// validate our config here before we rely on it being functioning below
|
||||
if _, err := leaderelection.NewLeaderElector(leaderElectionConfig); err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid config - could not create leader elector: %w", err)
|
||||
}
|
||||
|
||||
writeOnlyWhenLeader := kubeclient.MiddlewareFunc(func(_ context.Context, rt kubeclient.RoundTrip) {
|
||||
switch rt.Verb() {
|
||||
case kubeclient.VerbGet, kubeclient.VerbList, kubeclient.VerbWatch:
|
||||
// reads are always allowed.
|
||||
// note that while our pods/exec into the kube cert agent pod is a write request from the
|
||||
// perspective of the Kube API, it is semantically a read request since no mutation occurs.
|
||||
// we simply use it to fill a cache, and we need all pods to have a functioning cache.
|
||||
// however, we do not need to handle it here because remotecommand.NewSPDYExecutor uses a
|
||||
// kubeclient.Client.JSONConfig as input. since our middleware logic is only injected into
|
||||
// the generated clientset code, this JSONConfig simply ignores this middleware all together.
|
||||
return
|
||||
}
|
||||
|
||||
if isLeader.Load() { // only perform "expensive" test for writes
|
||||
return // we are currently the leader, all actions are permitted
|
||||
}
|
||||
|
||||
rt.MutateRequest(func(_ kubeclient.Object) error {
|
||||
return ErrNotLeader // we are not the leader, fail the write request
|
||||
})
|
||||
})
|
||||
|
||||
leaderElectionOpts := append(
|
||||
// all middleware are always executed so this being the first middleware is not relevant
|
||||
[]kubeclient.Option{kubeclient.WithMiddleware(writeOnlyWhenLeader)},
|
||||
opts..., // do not mutate input slice
|
||||
)
|
||||
|
||||
client, err := kubeclient.New(leaderElectionOpts...)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create leader election client: %w", err)
|
||||
}
|
||||
|
||||
controllersWithLeaderElector := func(ctx context.Context, controllers func(context.Context)) {
|
||||
leaderElectorCtx, leaderElectorCancel := context.WithCancel(context.Background()) // purposefully detached context
|
||||
|
||||
go func() {
|
||||
controllers(ctx) // run the controllers with the global context, this blocks until the context is canceled
|
||||
leaderElectorCancel() // once the controllers have all stopped, tell the leader elector to release the lock
|
||||
}()
|
||||
|
||||
for { // run (and rerun on release) the leader elector with its own context (blocking)
|
||||
select {
|
||||
case <-leaderElectorCtx.Done():
|
||||
return // keep trying to run until process exit
|
||||
|
||||
default:
|
||||
// blocks while trying to acquire lease, unblocks on release.
|
||||
// note that this creates a new leader elector on each loop to
|
||||
// prevent any bugs from reusing that struct across elections.
|
||||
// our config was validated above so this should never die.
|
||||
leaderelection.RunOrDie(leaderElectorCtx, leaderElectionConfig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return client, controllersWithLeaderElector, nil
|
||||
}
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/groupsuffix"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/leaderelection"
|
||||
"go.pinniped.dev/internal/oidc/jwks"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/oidc/provider/manager"
|
||||
@@ -94,6 +95,7 @@ func startControllers(
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
kubeInformers kubeinformers.SharedInformerFactory,
|
||||
pinnipedInformers pinnipedinformers.SharedInformerFactory,
|
||||
leaderElector func(context.Context, func(context.Context)),
|
||||
) {
|
||||
federationDomainInformer := pinnipedInformers.Config().V1alpha1().FederationDomains()
|
||||
secretInformer := kubeInformers.Core().V1().Secrets()
|
||||
@@ -261,7 +263,7 @@ func startControllers(
|
||||
kubeInformers.WaitForCacheSync(ctx.Done())
|
||||
pinnipedInformers.WaitForCacheSync(ctx.Done())
|
||||
|
||||
go controllerManager.Start(ctx)
|
||||
go leaderElector(ctx, controllerManager.Start)
|
||||
}
|
||||
|
||||
func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
@@ -275,14 +277,25 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
return fmt.Errorf("cannot create deployment ref: %w", err)
|
||||
}
|
||||
|
||||
client, err := kubeclient.New(
|
||||
opts := []kubeclient.Option{
|
||||
dref,
|
||||
kubeclient.WithMiddleware(groupsuffix.New(*cfg.APIGroupSuffix)),
|
||||
}
|
||||
|
||||
client, leaderElector, err := leaderelection.New(
|
||||
podInfo,
|
||||
supervisorDeployment,
|
||||
opts...,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
clientWithoutLeaderElection, err := kubeclient.New(opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client without leader election: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
client.Kubernetes,
|
||||
defaultResyncInterval,
|
||||
@@ -312,7 +325,7 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
dynamicJWKSProvider,
|
||||
dynamicUpstreamIDPProvider,
|
||||
&secretCache,
|
||||
client.Kubernetes.CoreV1().Secrets(serverInstallationNamespace),
|
||||
clientWithoutLeaderElection.Kubernetes.CoreV1().Secrets(serverInstallationNamespace), // writes to kube storage are allowed for non-leaders
|
||||
)
|
||||
|
||||
startControllers(
|
||||
@@ -328,6 +341,7 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
client.PinnipedSupervisor,
|
||||
kubeInformers,
|
||||
pinnipedInformers,
|
||||
leaderElector,
|
||||
)
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
@@ -32,10 +33,24 @@ type coreWrapper struct {
|
||||
opts *[]metav1.DeleteOptions
|
||||
}
|
||||
|
||||
func (c *coreWrapper) Pods(namespace string) corev1client.PodInterface {
|
||||
return &podsWrapper{PodInterface: c.CoreV1Interface.Pods(namespace), opts: c.opts}
|
||||
}
|
||||
|
||||
func (c *coreWrapper) Secrets(namespace string) corev1client.SecretInterface {
|
||||
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
|
||||
}
|
||||
|
||||
type podsWrapper struct {
|
||||
corev1client.PodInterface
|
||||
opts *[]metav1.DeleteOptions
|
||||
}
|
||||
|
||||
func (s *podsWrapper) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
*s.opts = append(*s.opts, opts)
|
||||
return s.PodInterface.Delete(ctx, name, opts)
|
||||
}
|
||||
|
||||
type secretsWrapper struct {
|
||||
corev1client.SecretInterface
|
||||
opts *[]metav1.DeleteOptions
|
||||
@@ -45,3 +60,12 @@ func (s *secretsWrapper) Delete(ctx context.Context, name string, opts metav1.De
|
||||
*s.opts = append(*s.opts, opts)
|
||||
return s.SecretInterface.Delete(ctx, name, opts)
|
||||
}
|
||||
|
||||
func NewPreconditions(uid types.UID, rv string) metav1.DeleteOptions {
|
||||
return metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: &uid,
|
||||
ResourceVersion: &rv,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user