mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2025-12-23 14:25:50 +00:00
Also probe aggregated API ports in new ciphers test
Co-authored-by: Joshua Casey <joshuatcasey@gmail.com>
This commit is contained in:
@@ -10,20 +10,39 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestLimitedCiphers_Disruptive will confirm that the Pinniped Supervisor exposes only those ciphers listed in
|
// TestLimitedCiphersFIPS_Disruptive will confirm that the Pinniped Supervisor and Concierge expose only those
|
||||||
// configuration.
|
// ciphers listed in configuration, when compiled in FIPS mode.
|
||||||
// This does not test the Concierge (which has the same feature) since the Concierge does not have exposed API
|
|
||||||
// endpoints with the Default profile.
|
|
||||||
// This does not test the CLI, since it does not have a feature to limit cipher suites.
|
// This does not test the CLI, since it does not have a feature to limit cipher suites.
|
||||||
func TestLimitedCiphersFIPS_Disruptive(t *testing.T) {
|
func TestLimitedCiphersFIPS_Disruptive(t *testing.T) {
|
||||||
performLimitedCiphersTest(t,
|
performLimitedCiphersTest(t,
|
||||||
|
// The user-configured ciphers for both the Supervisor and Concierge.
|
||||||
|
// This is a subset of the hardcoded ciphers from profiles_fips_strict.go.
|
||||||
[]string{
|
[]string{
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||||
"TLS_RSA_WITH_AES_256_GCM_SHA384", // this is an insecure cipher but allowed for FIPS
|
"TLS_RSA_WITH_AES_256_GCM_SHA384", // this is an insecure cipher but allowed for FIPS
|
||||||
},
|
},
|
||||||
[]uint16{
|
// Expected server configuration for the Supervisor's OIDC endpoints.
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
&tls.Config{
|
||||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
MinVersion: tls.VersionTLS12, // Supervisor OIDC always allows TLS 1.2 clients to connect
|
||||||
})
|
MaxVersion: tls.VersionTLS12, // boringcrypto does not use TLS 1.3 yet
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
// Supervisor OIDC endpoints configured with EC certs use only EC ciphers.
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Expected server configuration for the Supervisor and Concierge aggregated API endpoints.
|
||||||
|
&tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12, // boringcrypto does not use TLS 1.3 yet
|
||||||
|
MaxVersion: tls.VersionTLS12, // boringcrypto does not use TLS 1.3 yet
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,21 +10,34 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestLimitedCiphersNotFIPS_Disruptive will confirm that the Pinniped Supervisor exposes only those ciphers listed in
|
// TestLimitedCiphersNotFIPS_Disruptive will confirm that the Pinniped Supervisor and Concierge expose only those
|
||||||
// configuration.
|
// ciphers listed in configuration, when compiled in non-FIPS mode.
|
||||||
// This does not test the Concierge (which has the same feature) since the Concierge does not have exposed API
|
|
||||||
// endpoints with the Default profile.
|
|
||||||
// This does not test the CLI, since it does not have a feature to limit cipher suites.
|
// This does not test the CLI, since it does not have a feature to limit cipher suites.
|
||||||
func TestLimitedCiphersNotFIPS_Disruptive(t *testing.T) {
|
func TestLimitedCiphersNotFIPS_Disruptive(t *testing.T) {
|
||||||
performLimitedCiphersTest(t,
|
performLimitedCiphersTest(t,
|
||||||
|
// The user-configured ciphers for both the Supervisor and Concierge.
|
||||||
|
// This is a subset of the hardcoded ciphers from profiles.go.
|
||||||
[]string{
|
[]string{
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||||
},
|
},
|
||||||
[]uint16{
|
// Expected server configuration for the Supervisor's OIDC endpoints.
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
&tls.Config{
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
MinVersion: tls.VersionTLS12, // Supervisor OIDC always allows TLS 1.2 clients to connect
|
||||||
})
|
MaxVersion: tls.VersionTLS13,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
// Supervisor OIDC endpoints configured with EC certs use only EC ciphers.
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Expected server configuration for the Supervisor and Concierge aggregated API endpoints.
|
||||||
|
&tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS13, // do not allow TLS 1.2 clients to connect
|
||||||
|
MaxVersion: tls.VersionTLS13,
|
||||||
|
CipherSuites: nil, // TLS 1.3 ciphers are not configurable
|
||||||
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,78 +6,170 @@ package integration
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/config/concierge"
|
||||||
"go.pinniped.dev/internal/config/supervisor"
|
"go.pinniped.dev/internal/config/supervisor"
|
||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func performLimitedCiphersTest(t *testing.T, allowedCiphers []string, expectedCiphers []uint16) {
|
type stringEditorFunc func(t *testing.T, in string) string
|
||||||
env := testOnKindWithPodShutdown(t)
|
|
||||||
|
func performLimitedCiphersTest(
|
||||||
|
t *testing.T,
|
||||||
|
allowedCiphersConfig []string,
|
||||||
|
expectedConfigForSupervisorOIDCEndpoints *tls.Config,
|
||||||
|
expectedConfigForAggregatedAPIEndpoints *tls.Config,
|
||||||
|
) {
|
||||||
|
env := testEnvForPodShutdownTests(t)
|
||||||
|
|
||||||
client := testlib.NewKubernetesClientset(t)
|
|
||||||
configMapClient := client.CoreV1().ConfigMaps(env.SupervisorNamespace)
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
t.Cleanup(cancel)
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
staticConfigMapName := env.SupervisorAppName + "-static-config"
|
editSupervisorAllowedCiphersConfig := func(t *testing.T, configMapData string) string {
|
||||||
supervisorStaticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
t.Helper()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
originalSupervisorConfig := supervisorStaticConfigMap.Data["pinniped.yaml"]
|
var config supervisor.Config
|
||||||
require.NotEmpty(t, originalSupervisorConfig)
|
err := yaml.Unmarshal([]byte(configMapData), &config)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
supervisorStaticConfigMapCleanup, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
supervisorStaticConfigMapCleanup.Data = make(map[string]string)
|
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers) // precondition
|
||||||
supervisorStaticConfigMapCleanup.Data["pinniped.yaml"] = originalSupervisorConfig
|
|
||||||
|
|
||||||
_, err = configMapClient.Update(ctx, supervisorStaticConfigMapCleanup, metav1.UpdateOptions{})
|
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphersConfig
|
||||||
|
|
||||||
|
updatedConfig, err := yaml.Marshal(config)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
return string(updatedConfig)
|
||||||
// this will cycle all the pods
|
|
||||||
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
|
|
||||||
})
|
|
||||||
|
|
||||||
var config supervisor.Config
|
|
||||||
err = yaml.Unmarshal([]byte(originalSupervisorConfig), &config)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// As a precondition of this test, ensure that the list of allowedCiphers is empty
|
|
||||||
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers)
|
|
||||||
|
|
||||||
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphers
|
|
||||||
|
|
||||||
updatedSupervisorConfig, err := yaml.Marshal(config)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
supervisorStaticConfigMap.Data = make(map[string]string)
|
|
||||||
supervisorStaticConfigMap.Data["pinniped.yaml"] = string(updatedSupervisorConfig)
|
|
||||||
|
|
||||||
_, err = configMapClient.Update(ctx, supervisorStaticConfigMap, metav1.UpdateOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// this will cycle all the pods
|
|
||||||
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
|
|
||||||
|
|
||||||
startKubectlPortForward(ctx, t, "10509", "443", env.SupervisorAppName+"-nodeport", env.SupervisorNamespace)
|
|
||||||
stdout, stderr := testlib.RunNmapSSLEnum(t, "127.0.0.1", 10509)
|
|
||||||
require.Empty(t, stderr)
|
|
||||||
|
|
||||||
expectedCiphersConfig := &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
MaxVersion: testlib.MaxTLSVersion,
|
|
||||||
CipherSuites: expectedCiphers,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Contains(t, stdout, testlib.GetExpectedCiphers(expectedCiphersConfig, "server"), "stdout:\n%s", stdout)
|
editConciergeAllowedCiphersConfig := func(t *testing.T, configMapData string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var config concierge.Config
|
||||||
|
err := yaml.Unmarshal([]byte(configMapData), &config)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers) // precondition
|
||||||
|
|
||||||
|
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphersConfig
|
||||||
|
|
||||||
|
updatedConfig, err := yaml.Marshal(config)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return string(updatedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update Supervisor's allowed ciphers in its static configmap and restart pods.
|
||||||
|
updateStaticConfigMapAndRestartApp(t,
|
||||||
|
ctx,
|
||||||
|
env.SupervisorNamespace,
|
||||||
|
env.SupervisorAppName+"-static-config",
|
||||||
|
env.SupervisorAppName,
|
||||||
|
false,
|
||||||
|
editSupervisorAllowedCiphersConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Update Concierge's allowed ciphers in its static configmap and restart pods.
|
||||||
|
updateStaticConfigMapAndRestartApp(t,
|
||||||
|
ctx,
|
||||||
|
env.ConciergeNamespace,
|
||||||
|
env.ConciergeAppName+"-config",
|
||||||
|
env.ConciergeAppName,
|
||||||
|
true,
|
||||||
|
editConciergeAllowedCiphersConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Probe TLS config of Supervisor's OIDC endpoints.
|
||||||
|
expectTLSConfigForServicePort(t, ctx,
|
||||||
|
env.SupervisorAppName+"-nodeport", env.SupervisorNamespace, "10509",
|
||||||
|
expectedConfigForSupervisorOIDCEndpoints,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Probe TLS config of Supervisor's aggregated endpoints.
|
||||||
|
expectTLSConfigForServicePort(t, ctx,
|
||||||
|
env.SupervisorAppName+"-api", env.SupervisorNamespace, "10510",
|
||||||
|
expectedConfigForAggregatedAPIEndpoints,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Probe TLS config of Concierge's aggregated endpoints.
|
||||||
|
expectTLSConfigForServicePort(t, ctx,
|
||||||
|
env.ConciergeAppName+"-api", env.ConciergeNamespace, "10511",
|
||||||
|
expectedConfigForAggregatedAPIEndpoints,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectTLSConfigForServicePort(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context,
|
||||||
|
serviceName string,
|
||||||
|
serviceNamespace string,
|
||||||
|
localPortAsStr string,
|
||||||
|
expectedConfig *tls.Config,
|
||||||
|
) {
|
||||||
|
portAsInt, err := strconv.Atoi(localPortAsStr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
portAsUint := uint16(portAsInt) // okay to cast because it will only be legal port numbers
|
||||||
|
|
||||||
|
startKubectlPortForward(ctx, t, localPortAsStr, "443", serviceName, serviceNamespace)
|
||||||
|
|
||||||
|
stdout, stderr := testlib.RunNmapSSLEnum(t, "127.0.0.1", portAsUint)
|
||||||
|
require.Empty(t, stderr)
|
||||||
|
|
||||||
|
expectedNMapOutput := testlib.GetExpectedCiphers(expectedConfig, "server")
|
||||||
|
assert.Contains(t,
|
||||||
|
stdout,
|
||||||
|
expectedNMapOutput,
|
||||||
|
"actual nmap output:\n%s", stdout,
|
||||||
|
"but was expected to contain:\n%s", expectedNMapOutput,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateStaticConfigMapAndRestartApp(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context,
|
||||||
|
namespace string,
|
||||||
|
staticConfigMapName string,
|
||||||
|
appName string,
|
||||||
|
isConcierge bool,
|
||||||
|
editConfigMapFunc stringEditorFunc,
|
||||||
|
) {
|
||||||
|
configMapClient := testlib.NewKubernetesClientset(t).CoreV1().ConfigMaps(namespace)
|
||||||
|
|
||||||
|
staticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
originalConfig := staticConfigMap.Data["pinniped.yaml"]
|
||||||
|
require.NotEmpty(t, originalConfig)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
cleanupCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
staticConfigMapForCleanup, err := configMapClient.Get(cleanupCtx, staticConfigMapName, metav1.GetOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
staticConfigMapForCleanup.Data = make(map[string]string)
|
||||||
|
staticConfigMapForCleanup.Data["pinniped.yaml"] = originalConfig
|
||||||
|
|
||||||
|
_, err = configMapClient.Update(cleanupCtx, staticConfigMapForCleanup, metav1.UpdateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
restartAllPodsOfApp(t, namespace, appName, isConcierge)
|
||||||
|
})
|
||||||
|
|
||||||
|
staticConfigMap.Data = make(map[string]string)
|
||||||
|
staticConfigMap.Data["pinniped.yaml"] = editConfigMapFunc(t, originalConfig)
|
||||||
|
|
||||||
|
_, err = configMapClient.Update(ctx, staticConfigMap, metav1.UpdateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
restartAllPodsOfApp(t, namespace, appName, isConcierge)
|
||||||
}
|
}
|
||||||
|
|
||||||
// restartAllPodsOfApp will immediately scale to 0 and then scale back.
|
// restartAllPodsOfApp will immediately scale to 0 and then scale back.
|
||||||
@@ -101,6 +193,7 @@ func restartAllPodsOfApp(
|
|||||||
|
|
||||||
// Scale down the deployment's number of replicas to 0, which will shut down all the pods.
|
// Scale down the deployment's number of replicas to 0, which will shut down all the pods.
|
||||||
originalScale := updateDeploymentScale(t, namespace, appName, 0)
|
originalScale := updateDeploymentScale(t, namespace, appName, 0)
|
||||||
|
require.Greater(t, originalScale, 0)
|
||||||
|
|
||||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||||
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
||||||
@@ -113,43 +206,6 @@ func restartAllPodsOfApp(
|
|||||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||||
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
||||||
requireEventually.Len(newPods, originalScale, "wanted %d pods", originalScale)
|
requireEventually.Len(newPods, originalScale, "wanted %d pods", originalScale)
|
||||||
|
requireEventually.True(allPodsReady(newPods), "wanted all new pods to be ready")
|
||||||
}, 2*time.Minute, 200*time.Millisecond)
|
}, 2*time.Minute, 200*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRemoveAllowedCiphersFromStaticConfig_Disruptive updates the Supervisor's static configuration to make sure that the allowed ciphers list is empty.
|
|
||||||
// It will restart the Supervisor pods. Skipped because it's only here for local testing purposes.
|
|
||||||
func TestRemoveAllowedCiphersFromStaticConfig_Disruptive(t *testing.T) {
|
|
||||||
t.Skip()
|
|
||||||
|
|
||||||
env := testOnKindWithPodShutdown(t)
|
|
||||||
|
|
||||||
client := testlib.NewKubernetesClientset(t)
|
|
||||||
configMapClient := client.CoreV1().ConfigMaps(env.SupervisorNamespace)
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
staticConfigMapName := env.SupervisorAppName + "-static-config"
|
|
||||||
supervisorStaticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
originalSupervisorConfig := supervisorStaticConfigMap.Data["pinniped.yaml"]
|
|
||||||
require.NotEmpty(t, originalSupervisorConfig)
|
|
||||||
|
|
||||||
var config supervisor.Config
|
|
||||||
err = yaml.Unmarshal([]byte(originalSupervisorConfig), &config)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
config.TLS.OneDotTwo.AllowedCiphers = nil
|
|
||||||
|
|
||||||
updatedConfigBytes, err := yaml.Marshal(config)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
supervisorStaticConfigMap.Data = make(map[string]string)
|
|
||||||
supervisorStaticConfigMap.Data["pinniped.yaml"] = string(updatedConfigBytes)
|
|
||||||
|
|
||||||
_, err = configMapClient.Update(ctx, supervisorStaticConfigMap, metav1.UpdateOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// this will cycle all the pods
|
|
||||||
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -24,21 +24,20 @@ import (
|
|||||||
// before they die.
|
// before they die.
|
||||||
// Never run this test in parallel since deleting the pods is disruptive, see main_test.go.
|
// Never run this test in parallel since deleting the pods is disruptive, see main_test.go.
|
||||||
func TestPodShutdown_Disruptive(t *testing.T) {
|
func TestPodShutdown_Disruptive(t *testing.T) {
|
||||||
env := testOnKindWithPodShutdown(t)
|
env := testEnvForPodShutdownTests(t)
|
||||||
|
|
||||||
shutdownAllPodsOfApp(t, env, env.ConciergeNamespace, env.ConciergeAppName, true)
|
shutdownAllPodsOfApp(t, env, env.ConciergeNamespace, env.ConciergeAppName, true)
|
||||||
shutdownAllPodsOfApp(t, env, env.SupervisorNamespace, env.SupervisorAppName, false)
|
shutdownAllPodsOfApp(t, env, env.SupervisorNamespace, env.SupervisorAppName, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// testOnKindWithPodShutdown builds an env with the following description:
|
// testEnvForPodShutdownTests builds an env with the following description:
|
||||||
// Only run this test in CI on Kind clusters, because something about restarting the pods
|
// Only run this test in CI on Kind clusters, because something about restarting the pods
|
||||||
// in this test breaks the "kubectl port-forward" commands that we are using in CI for
|
// in this test breaks the "kubectl port-forward" commands that we are using in CI for
|
||||||
// AKS, EKS, and GKE clusters. The Go code that we wrote for graceful pod shutdown should
|
// AKS, EKS, and GKE clusters. The Go code that we wrote for graceful pod shutdown should
|
||||||
// not be sensitive to which distribution it runs on, so running this test only on Kind
|
// not be sensitive to which distribution it runs on, so running this test only on Kind
|
||||||
// should give us sufficient coverage for what we are trying to test here.
|
// should give us sufficient coverage for what we are trying to test here.
|
||||||
func testOnKindWithPodShutdown(t *testing.T) *testlib.TestEnv {
|
func testEnvForPodShutdownTests(t *testing.T) *testlib.TestEnv {
|
||||||
return testlib.IntegrationEnv(t, testlib.SkipPodRestartAssertions()).
|
return testlib.IntegrationEnv(t, testlib.SkipPodRestartAssertions()).WithKubeDistribution(testlib.KindDistro)
|
||||||
WithKubeDistribution(testlib.KindDistro)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func shutdownAllPodsOfApp(
|
func shutdownAllPodsOfApp(
|
||||||
@@ -94,11 +93,12 @@ func shutdownAllPodsOfApp(
|
|||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
updateDeploymentScale(t, namespace, appName, originalScale)
|
updateDeploymentScale(t, namespace, appName, originalScale)
|
||||||
|
|
||||||
// Wait for all the new pods to be running.
|
// Wait for all the new pods to be running and ready.
|
||||||
var newPods []corev1.Pod
|
var newPods []corev1.Pod
|
||||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||||
newPods = getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
newPods = getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
|
||||||
requireEventually.Len(newPods, originalScale, "wanted pods to return to original scale")
|
requireEventually.Len(newPods, originalScale, "wanted pods to return to original scale")
|
||||||
|
requireEventually.True(allPodsReady(newPods), "wanted all new pods to be ready")
|
||||||
}, 2*time.Minute, 200*time.Millisecond)
|
}, 2*time.Minute, 200*time.Millisecond)
|
||||||
|
|
||||||
// After a short time, leader election should have finished and the lease should contain the name of
|
// After a short time, leader election should have finished and the lease should contain the name of
|
||||||
@@ -186,6 +186,24 @@ func getRunningPodsByNamePrefix(
|
|||||||
return foundPods
|
return foundPods
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func allPodsReady(pods []corev1.Pod) bool {
|
||||||
|
for _, pod := range pods {
|
||||||
|
if !isPodReady(pod) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPodReady(pod corev1.Pod) bool {
|
||||||
|
for _, cond := range pod.Status.Conditions {
|
||||||
|
if cond.Type == corev1.PodReady {
|
||||||
|
return cond.Status == corev1.ConditionTrue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func updateDeploymentScale(t *testing.T, namespace string, deploymentName string, newScale int) int {
|
func updateDeploymentScale(t *testing.T, namespace string, deploymentName string, newScale int) int {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||||
|
|||||||
@@ -38,13 +38,16 @@ func RunNmapSSLEnum(t *testing.T, host string, port uint16) (string, string) {
|
|||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
var stdout, stderr bytes.Buffer
|
||||||
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
||||||
cmd := exec.CommandContext(ctx, "nmap", "--script", "ssl-enum-ciphers",
|
cmd := exec.CommandContext(ctx,
|
||||||
|
"nmap",
|
||||||
|
"-Pn",
|
||||||
|
"--script", "+ssl-enum-ciphers",
|
||||||
"-p", strconv.FormatUint(uint64(port), 10),
|
"-p", strconv.FormatUint(uint64(port), 10),
|
||||||
host,
|
host,
|
||||||
)
|
)
|
||||||
cmd.Stdout = &stdout
|
cmd.Stdout = &stdout
|
||||||
cmd.Stderr = &stderr
|
cmd.Stderr = &stderr
|
||||||
|
t.Log("Running cmd: " + strings.Join(cmd.Args, " "))
|
||||||
require.NoErrorf(t, cmd.Run(), "stderr:\n%s\n\nstdout:\n%s\n\n", stderr.String(), stdout.String())
|
require.NoErrorf(t, cmd.Run(), "stderr:\n%s\n\nstdout:\n%s\n\n", stderr.String(), stdout.String())
|
||||||
|
|
||||||
return stdout.String(), stderr.String()
|
return stdout.String(), stderr.String()
|
||||||
|
|||||||
@@ -5,12 +5,8 @@
|
|||||||
|
|
||||||
package testlib
|
package testlib
|
||||||
|
|
||||||
import "crypto/tls"
|
|
||||||
|
|
||||||
// Because of a bug in nmap, the cipher suite preference is
|
// Because of a bug in nmap, the cipher suite preference is
|
||||||
// incorrectly shown as 'client' in some cases.
|
// incorrectly shown as 'client' in some cases.
|
||||||
// in fips-only mode, it correctly shows the cipher preference
|
// in fips-only mode, it correctly shows the cipher preference
|
||||||
// as 'server', while in non-fips mode it shows as 'client'.
|
// as 'server', while in non-fips mode it shows as 'client'.
|
||||||
const DefaultCipherSuitePreference = "server"
|
const DefaultCipherSuitePreference = "server"
|
||||||
|
|
||||||
const MaxTLSVersion = tls.VersionTLS12
|
|
||||||
|
|||||||
@@ -5,12 +5,8 @@
|
|||||||
|
|
||||||
package testlib
|
package testlib
|
||||||
|
|
||||||
import "crypto/tls"
|
|
||||||
|
|
||||||
// Because of a bug in nmap, the cipher suite preference is
|
// Because of a bug in nmap, the cipher suite preference is
|
||||||
// incorrectly shown as 'client' in some cases.
|
// incorrectly shown as 'client' in some cases.
|
||||||
// in fips-only mode, it correctly shows the cipher preference
|
// in fips-only mode, it correctly shows the cipher preference
|
||||||
// as 'server', while in non-fips mode it shows as 'client'.
|
// as 'server', while in non-fips mode it shows as 'client'.
|
||||||
const DefaultCipherSuitePreference = "client"
|
const DefaultCipherSuitePreference = "client"
|
||||||
|
|
||||||
const MaxTLSVersion = tls.VersionTLS13
|
|
||||||
|
|||||||
Reference in New Issue
Block a user