mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-03 11:45:45 +00:00
Also probe aggregated API ports in new ciphers test
Co-authored-by: Joshua Casey <joshuatcasey@gmail.com>
This commit is contained in:
@@ -6,78 +6,170 @@ package integration
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"go.pinniped.dev/internal/config/concierge"
|
||||
"go.pinniped.dev/internal/config/supervisor"
|
||||
"go.pinniped.dev/test/testlib"
|
||||
)
|
||||
|
||||
func performLimitedCiphersTest(t *testing.T, allowedCiphers []string, expectedCiphers []uint16) {
|
||||
env := testOnKindWithPodShutdown(t)
|
||||
type stringEditorFunc func(t *testing.T, in string) string
|
||||
|
||||
func performLimitedCiphersTest(
|
||||
t *testing.T,
|
||||
allowedCiphersConfig []string,
|
||||
expectedConfigForSupervisorOIDCEndpoints *tls.Config,
|
||||
expectedConfigForAggregatedAPIEndpoints *tls.Config,
|
||||
) {
|
||||
env := testEnvForPodShutdownTests(t)
|
||||
|
||||
client := testlib.NewKubernetesClientset(t)
|
||||
configMapClient := client.CoreV1().ConfigMaps(env.SupervisorNamespace)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
staticConfigMapName := env.SupervisorAppName + "-static-config"
|
||||
supervisorStaticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
editSupervisorAllowedCiphersConfig := func(t *testing.T, configMapData string) string {
|
||||
t.Helper()
|
||||
|
||||
originalSupervisorConfig := supervisorStaticConfigMap.Data["pinniped.yaml"]
|
||||
require.NotEmpty(t, originalSupervisorConfig)
|
||||
|
||||
t.Cleanup(func() {
|
||||
supervisorStaticConfigMapCleanup, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
||||
var config supervisor.Config
|
||||
err := yaml.Unmarshal([]byte(configMapData), &config)
|
||||
require.NoError(t, err)
|
||||
|
||||
supervisorStaticConfigMapCleanup.Data = make(map[string]string)
|
||||
supervisorStaticConfigMapCleanup.Data["pinniped.yaml"] = originalSupervisorConfig
|
||||
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers) // precondition
|
||||
|
||||
_, err = configMapClient.Update(ctx, supervisorStaticConfigMapCleanup, metav1.UpdateOptions{})
|
||||
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphersConfig
|
||||
|
||||
updatedConfig, err := yaml.Marshal(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
// this will cycle all the pods
|
||||
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
|
||||
})
|
||||
|
||||
var config supervisor.Config
|
||||
err = yaml.Unmarshal([]byte(originalSupervisorConfig), &config)
|
||||
require.NoError(t, err)
|
||||
|
||||
// As a precondition of this test, ensure that the list of allowedCiphers is empty
|
||||
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers)
|
||||
|
||||
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphers
|
||||
|
||||
updatedSupervisorConfig, err := yaml.Marshal(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
supervisorStaticConfigMap.Data = make(map[string]string)
|
||||
supervisorStaticConfigMap.Data["pinniped.yaml"] = string(updatedSupervisorConfig)
|
||||
|
||||
_, err = configMapClient.Update(ctx, supervisorStaticConfigMap, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// this will cycle all the pods
|
||||
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
|
||||
|
||||
startKubectlPortForward(ctx, t, "10509", "443", env.SupervisorAppName+"-nodeport", env.SupervisorNamespace)
|
||||
stdout, stderr := testlib.RunNmapSSLEnum(t, "127.0.0.1", 10509)
|
||||
require.Empty(t, stderr)
|
||||
|
||||
expectedCiphersConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
MaxVersion: testlib.MaxTLSVersion,
|
||||
CipherSuites: expectedCiphers,
|
||||
return string(updatedConfig)
|
||||
}
|
||||
|
||||
require.Contains(t, stdout, testlib.GetExpectedCiphers(expectedCiphersConfig, "server"), "stdout:\n%s", stdout)
|
||||
editConciergeAllowedCiphersConfig := func(t *testing.T, configMapData string) string {
|
||||
t.Helper()
|
||||
|
||||
var config concierge.Config
|
||||
err := yaml.Unmarshal([]byte(configMapData), &config)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers) // precondition
|
||||
|
||||
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphersConfig
|
||||
|
||||
updatedConfig, err := yaml.Marshal(config)
|
||||
require.NoError(t, err)
|
||||
return string(updatedConfig)
|
||||
}
|
||||
|
||||
// Update Supervisor's allowed ciphers in its static configmap and restart pods.
|
||||
updateStaticConfigMapAndRestartApp(t,
|
||||
ctx,
|
||||
env.SupervisorNamespace,
|
||||
env.SupervisorAppName+"-static-config",
|
||||
env.SupervisorAppName,
|
||||
false,
|
||||
editSupervisorAllowedCiphersConfig,
|
||||
)
|
||||
|
||||
// Update Concierge's allowed ciphers in its static configmap and restart pods.
|
||||
updateStaticConfigMapAndRestartApp(t,
|
||||
ctx,
|
||||
env.ConciergeNamespace,
|
||||
env.ConciergeAppName+"-config",
|
||||
env.ConciergeAppName,
|
||||
true,
|
||||
editConciergeAllowedCiphersConfig,
|
||||
)
|
||||
|
||||
// Probe TLS config of Supervisor's OIDC endpoints.
|
||||
expectTLSConfigForServicePort(t, ctx,
|
||||
env.SupervisorAppName+"-nodeport", env.SupervisorNamespace, "10509",
|
||||
expectedConfigForSupervisorOIDCEndpoints,
|
||||
)
|
||||
|
||||
// Probe TLS config of Supervisor's aggregated endpoints.
|
||||
expectTLSConfigForServicePort(t, ctx,
|
||||
env.SupervisorAppName+"-api", env.SupervisorNamespace, "10510",
|
||||
expectedConfigForAggregatedAPIEndpoints,
|
||||
)
|
||||
|
||||
// Probe TLS config of Concierge's aggregated endpoints.
|
||||
expectTLSConfigForServicePort(t, ctx,
|
||||
env.ConciergeAppName+"-api", env.ConciergeNamespace, "10511",
|
||||
expectedConfigForAggregatedAPIEndpoints,
|
||||
)
|
||||
}
|
||||
|
||||
func expectTLSConfigForServicePort(
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
serviceName string,
|
||||
serviceNamespace string,
|
||||
localPortAsStr string,
|
||||
expectedConfig *tls.Config,
|
||||
) {
|
||||
portAsInt, err := strconv.Atoi(localPortAsStr)
|
||||
require.NoError(t, err)
|
||||
portAsUint := uint16(portAsInt) // okay to cast because it will only be legal port numbers
|
||||
|
||||
startKubectlPortForward(ctx, t, localPortAsStr, "443", serviceName, serviceNamespace)
|
||||
|
||||
stdout, stderr := testlib.RunNmapSSLEnum(t, "127.0.0.1", portAsUint)
|
||||
require.Empty(t, stderr)
|
||||
|
||||
expectedNMapOutput := testlib.GetExpectedCiphers(expectedConfig, "server")
|
||||
assert.Contains(t,
|
||||
stdout,
|
||||
expectedNMapOutput,
|
||||
"actual nmap output:\n%s", stdout,
|
||||
"but was expected to contain:\n%s", expectedNMapOutput,
|
||||
)
|
||||
}
|
||||
|
||||
func updateStaticConfigMapAndRestartApp(
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
staticConfigMapName string,
|
||||
appName string,
|
||||
isConcierge bool,
|
||||
editConfigMapFunc stringEditorFunc,
|
||||
) {
|
||||
configMapClient := testlib.NewKubernetesClientset(t).CoreV1().ConfigMaps(namespace)
|
||||
|
||||
staticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
originalConfig := staticConfigMap.Data["pinniped.yaml"]
|
||||
require.NotEmpty(t, originalConfig)
|
||||
|
||||
t.Cleanup(func() {
|
||||
cleanupCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
staticConfigMapForCleanup, err := configMapClient.Get(cleanupCtx, staticConfigMapName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
staticConfigMapForCleanup.Data = make(map[string]string)
|
||||
staticConfigMapForCleanup.Data["pinniped.yaml"] = originalConfig
|
||||
|
||||
_, err = configMapClient.Update(cleanupCtx, staticConfigMapForCleanup, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
restartAllPodsOfApp(t, namespace, appName, isConcierge)
|
||||
})
|
||||
|
||||
staticConfigMap.Data = make(map[string]string)
|
||||
staticConfigMap.Data["pinniped.yaml"] = editConfigMapFunc(t, originalConfig)
|
||||
|
||||
_, err = configMapClient.Update(ctx, staticConfigMap, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
restartAllPodsOfApp(t, namespace, appName, isConcierge)
|
||||
}
|
||||
|
||||
// restartAllPodsOfApp will immediately scale to 0 and then scale back.
|
||||
@@ -101,6 +193,7 @@ func restartAllPodsOfApp(
|
||||
|
||||
// Scale down the deployment's number of replicas to 0, which will shut down all the pods.
|
||||
originalScale := updateDeploymentScale(t, namespace, appName, 0)
|
||||
require.Greater(t, originalScale, 0)
|
||||
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
||||
@@ -113,43 +206,6 @@ func restartAllPodsOfApp(
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
||||
requireEventually.Len(newPods, originalScale, "wanted %d pods", originalScale)
|
||||
requireEventually.True(allPodsReady(newPods), "wanted all new pods to be ready")
|
||||
}, 2*time.Minute, 200*time.Millisecond)
|
||||
}
|
||||
|
||||
// TestRemoveAllowedCiphersFromStaticConfig_Disruptive updates the Supervisor's static configuration to make sure that the allowed ciphers list is empty.
|
||||
// It will restart the Supervisor pods. Skipped because it's only here for local testing purposes.
|
||||
func TestRemoveAllowedCiphersFromStaticConfig_Disruptive(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
env := testOnKindWithPodShutdown(t)
|
||||
|
||||
client := testlib.NewKubernetesClientset(t)
|
||||
configMapClient := client.CoreV1().ConfigMaps(env.SupervisorNamespace)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
staticConfigMapName := env.SupervisorAppName + "-static-config"
|
||||
supervisorStaticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
originalSupervisorConfig := supervisorStaticConfigMap.Data["pinniped.yaml"]
|
||||
require.NotEmpty(t, originalSupervisorConfig)
|
||||
|
||||
var config supervisor.Config
|
||||
err = yaml.Unmarshal([]byte(originalSupervisorConfig), &config)
|
||||
require.NoError(t, err)
|
||||
|
||||
config.TLS.OneDotTwo.AllowedCiphers = nil
|
||||
|
||||
updatedConfigBytes, err := yaml.Marshal(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
supervisorStaticConfigMap.Data = make(map[string]string)
|
||||
supervisorStaticConfigMap.Data["pinniped.yaml"] = string(updatedConfigBytes)
|
||||
|
||||
_, err = configMapClient.Update(ctx, supervisorStaticConfigMap, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// this will cycle all the pods
|
||||
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user