Merge pull request #1701 from vmware-tanzu/ben/carvel-package/local-registry

Carvel Package POC for local development
This commit is contained in:
Ryan Richard
2023-11-08 14:49:32 -08:00
committed by GitHub
28 changed files with 1226 additions and 388 deletions

View File

@@ -347,7 +347,12 @@ spec:
#@ if data.values.impersonation_proxy_spec.service.load_balancer_ip:
loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip
#@ end
#@ if data.values.impersonation_proxy_spec.service.annotations == None:
annotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "4000"
#@ else:
annotations: #@ data.values.impersonation_proxy_spec.service.annotations
#@ end
---
apiVersion: v1
kind: Secret

View File

@@ -1,107 +1,227 @@
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! Copyright 2020-2023 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@data/values
---
#@ def validate_strings_map(obj):
#@ # Returns True if obj is an associative data structure string→string, and False otherwise.
#@ for key in obj:
#@ if type(key) != "string" or type(obj[key]) != "string":
#@ return False
#@ end
#@ end
#@ return True
#@ end
#@data/values-schema
---
#@schema/title "App name"
#@schema/desc "Used to help determine the names of various resources and labels."
#@schema/validation min_len=1
app_name: pinniped-concierge
#! Creates a new namespace statically in yaml with the given name and installs the app into that namespace.
#@schema/title "Namespace"
#@schema/desc "Creates a new namespace statically in yaml with the given name and installs the app into that namespace."
#@schema/validation min_len=1
namespace: pinniped-concierge
#! If specified, assumes that a namespace of the given name already exists and installs the app into that namespace.
#! If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used.
into_namespace: #! e.g. my-preexisting-namespace
#! All resources created statically by yaml at install-time and all resources created dynamically
#! by controllers at runtime will be labelled with `app: $app_name` and also with the labels
#! specified here. The value of `custom_labels` must be a map of string keys to string values.
#! The app can be uninstalled either by:
#! 1. Deleting the static install-time yaml resources including the static namespace, which will cascade and also delete
#! resources that were dynamically created by controllers at runtime
#! 2. Or, deleting all resources by label, which does not assume that there was a static install-time yaml namespace.
custom_labels: {} #! e.g. {myCustomLabelName: myCustomLabelValue, otherCustomLabelName: otherCustomLabelValue}
#@schema/title "Into namespace"
#@ into_namespace_desc = "If specified, assumes that a namespace of the given name already exists and installs the app into that namespace. \
#@ If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used."
#@schema/desc into_namespace_desc
#@schema/examples ("The name of an existing namespace", "my-preexisting-namespace")
#@schema/nullable
#@schema/validation min_len=1
into_namespace: ""
#! Specify how many replicas of the Pinniped server to run.
#@schema/title "Custom labels"
#@ custom_labels_desc = "All resources created statically by yaml at install-time and all resources created dynamically \
#@ by controllers at runtime will be labelled with `app: $app_name` and also with the labels specified here. The value of \
#@ `custom_labels` must be a map of string keys to string values. The app can be uninstalled either by: 1.) deleting the \
#@ static install-time yaml resources including the static namespace, which will cascade and also delete \
#@ resources that were dynamically created by controllers at runtime, or 2.) deleting all resources by label, which does \
#@ not assume that there was a static install-time yaml namespace."
#@schema/desc custom_labels_desc
#@schema/examples ("Example set of labels", {"myCustomLabelName": "myCustomLabelValue", "otherCustomLabelName": "otherCustomLabelValue"})
#@schema/type any=True
#@schema/validation ("a map of string keys and string values", validate_strings_map)
custom_labels: { }
#@schema/title "Replicas"
#@schema/desc "Specify how many replicas of the Pinniped server to run."
replicas: 2
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
#@schema/title "Image repo"
#@schema/desc "The repository for the Concierge container image."
#@schema/validation min_len=1
image_repo: projects.registry.vmware.com/pinniped/pinniped-server
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
#@schema/title "Image digest"
#@schema/desc "The image digest for the Concierge container image. If both image_digest or an image_tag are given, only image_digest will be used."
#@schema/examples ("Providing a digest", "sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8")
#@schema/nullable
#@schema/validation min_len=1, when=lambda _, ctx: ctx.parent["image_tag"] == None
image_digest: ""
#@schema/title "Image tag"
#@schema/desc "The image tag for the Concierge container image. If both image_digest or an image_tag are given, only image_digest will be used."
#@schema/examples ("Providing a tag", "v0.25.0")
#@schema/validation min_len=1, when=lambda _, ctx: ctx.parent["image_digest"] == None
image_tag: latest
#! Optionally specify a different image for the "kube-cert-agent" pod which is scheduled
#! on the control plane. This image needs only to include `sleep` and `cat` binaries.
#! By default, the same image specified for image_repo/image_digest/image_tag will be re-used.
kube_cert_agent_image:
#@schema/title "Kube Cert Agent image"
#@ kube_cert_agent_image = "Optionally specify a different image for the 'kube-cert-agent' pod which is scheduled \
#@ on the control plane. This image needs only to include `sleep` and `cat` binaries. \
#@ By default, the same image specified for image_repo/image_digest/image_tag will be re-used."
#@schema/desc kube_cert_agent_image
#@schema/examples ("Image including tag or digest", "projects.registry.vmware.com/pinniped/pinniped-server:latest")
#@schema/nullable
#@schema/validation min_len=1
kube_cert_agent_image: ""
#! Specifies a secret to be used when pulling the above `image_repo` container image.
#! Can be used when the above image_repo is a private registry.
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
#! Optional.
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
#@schema/title "Image pull dockerconfigjson"
#@ image_pull_dockerconfigjson_desc = "A base64 encoded secret to be used when pulling the `image_repo` container image. \
#@ Can be used when the image_repo is a private registry. Typically, the value would be the output of: \
#@ kubectl create secret docker-registry x --docker-server=https://example.io --docker-username='USERNAME' --docker-password='PASSWORD' --dry-run=client -o json | jq -r '.data[\".dockerconfigjson\"]'"
#@schema/desc image_pull_dockerconfigjson_desc
#@ example_desc = 'base64 encoding of: {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}'
#@ example_value = "eyJhdXRocyI6eyJodHRwczovL2V4YW1wbGUuaW8iOnsidXNlcm5hbWUiOiJVU0VSTkFNRSIsInBhc3N3b3JkIjoiUEFTU1dPUkQiLCJhdXRoIjoiVlZORlVrNUJUVVU2VUVGVFUxZFBVa1E9In19fQ=="
#@schema/examples (example_desc, example_value)
#@schema/nullable
#@schema/validation min_len=1
image_pull_dockerconfigjson: ""
#! Pinniped will try to guess the right K8s API URL for sharing that information with potential clients.
#! This setting allows the guess to be overridden.
#! Optional.
discovery_url: #! e.g., https://example.com
#@schema/title "Discovery URL"
#@schema/desc "Pinniped will try to guess the right K8s API URL for sharing that information with potential clients. This setting allows the guess to be overridden."
#@schema/examples ("Kubernetes API URL","https://example.com")
#@schema/nullable
#@schema/validation min_len=1
discovery_url: ""
#! Specify the duration and renewal interval for the API serving certificate.
#! The defaults are set to expire the cert about every 30 days, and to rotate it
#! about every 25 days.
#@schema/title "API serving certificate duration seconds"
#@ api_serving_certificate_duration_seconds_desc = "Specify the duration for the API serving certificate. \
#@ The default is set to expire the cert about every 30 days. \
#@ Specify this as an integer or as a string which contains an integer value."
#@schema/desc api_serving_certificate_duration_seconds_desc
#@schema/type any=True
#@schema/validation ("an int or string which contains an integer value", lambda v: type(v) in ["int", "string"])
api_serving_certificate_duration_seconds: 2592000
#@schema/title "API serving certificate renew before seconds"
#@ api_serving_certificate_renew_before_seconds_desc = "Specify the renewal interval for the API serving certificate. \
#@ The default is set to rotate it about every 25 days. \
#@ Specify this as an integer or as a string which contains an integer value."
#@schema/desc api_serving_certificate_renew_before_seconds_desc
#@schema/type any=True
#@schema/validation ("an int or string which contains an integer value", lambda v: type(v) in ["int", "string"])
api_serving_certificate_renew_before_seconds: 2160000
#! Specify the verbosity of logging: info ("nice to know" information), debug (developer
#! information), trace (timing information), all (kitchen sink).
log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs.
#! Specify the format of logging: json (for machine parsable logs) and text (for legacy klog formatted logs).
#! By default, when this value is left unset, logs are formatted in json.
#! This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json.
deprecated_log_format:
#@schema/title "Log level"
#@ log_level_desc = "Specify the verbosity of logging: info (\"nice to know\" information), debug (developer information), trace (timing information), \
#@ or all (kitchen sink). Do not use trace or all on production systems, as credentials may get logged. \
#@ When this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs."
#@schema/desc log_level_desc
#@schema/examples ("Developer logging information","debug")
#@schema/nullable
#@schema/validation one_of=["info", "debug", "trace", "all"]
log_level: ""
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
#@schema/title "Log format"
#@ deprecated_log_format_desc = "Specify the format of logging: json (for machine parsable logs) and text (for legacy klog formatted logs). \
#@ By default, when this value is left unset, logs are formatted in json. \
#@ This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json."
#@schema/desc deprecated_log_format_desc
#@schema/examples ("Set logs to JSON format","json")
#@schema/nullable
#@schema/validation one_of=["json", "text"]
#@schema/deprecated "This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json."
deprecated_log_format: ""
#! Specify the API group suffix for all Pinniped API groups. By default, this is set to
#! pinniped.dev, so Pinniped API groups will look like foo.pinniped.dev,
#! authentication.concierge.pinniped.dev, etc. As an example, if this is set to tuna.io, then
#! Pinniped API groups will look like foo.tuna.io. authentication.concierge.tuna.io, etc.
#@schema/title "Run as user"
#@schema/desc "The user ID that will own the process."
#! See the Dockerfile for the reasoning behind this default value.
run_as_user: 65532
#@schema/title "Run as group"
#@schema/desc "The group ID that will own the process."
#! See the Dockerfile for the reasoning behind this default value.
run_as_group: 65532
#@schema/title "API group suffix"
#@ api_group_suffix_desc = "Specify the API group suffix for all Pinniped API groups. By default, this is set to \
#@ pinniped.dev, so Pinniped API groups will look like foo.pinniped.dev, \
#@ authentication.concierge.pinniped.dev, etc. As an example, if this is set to tuna.io, then \
#@ Pinniped API groups will look like foo.tuna.io. authentication.concierge.tuna.io, etc."
#@schema/desc api_group_suffix_desc
#@schema/validation min_len=1
api_group_suffix: pinniped.dev
#! Customize CredentialIssuer.spec.impersonationProxy to change how the concierge
#! handles impersonation.
#@schema/title "Impersonation proxy spec"
#@schema/desc "Customize CredentialIssuer.spec.impersonationProxy to change how the concierge handles impersonation."
impersonation_proxy_spec:
#! options are "auto", "disabled" or "enabled".
#! If auto, the impersonation proxy will run only if the cluster signing key is not available
#! and the other strategy does not work.
#! If disabled, the impersonation proxy will never run, which could mean that the concierge
#! doesn't work at all.
#! If enabled, the impersonation proxy will always run regardless of other strategies available.
mode: auto
#! The endpoint which the client should use to connect to the impersonation proxy.
#! If left unset, the client will default to connecting based on the ClusterIP or LoadBalancer
#! endpoint.
external_endpoint:
service:
#! Options are "LoadBalancer", "ClusterIP" and "None".
#! LoadBalancer automatically provisions a Service of type LoadBalancer pointing at
#! the impersonation proxy. Some cloud providers will allocate
#! a public IP address by default even on private clusters.
#! ClusterIP automatically provisions a Service of type ClusterIP pointing at the
#! impersonation proxy.
#! None does not provision either and assumes that you have set the external_endpoint
#! and set up your own ingress to connect to the impersonation proxy.
type: LoadBalancer
#! The annotations that should be set on the ClusterIP or LoadBalancer Service.
annotations:
{service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "4000"}
#! When mode LoadBalancer is set, this will set the LoadBalancer Service's Spec.LoadBalancerIP.
load_balancer_ip:
#! Set the standard golang HTTPS_PROXY and NO_PROXY environment variables on the Concierge containers.
#! These will be used when the Concierge makes backend-to-backend calls to authenticators using HTTPS,
#! e.g. when the Concierge fetches discovery documents, JWKS keys, and POSTs to token webhooks.
#! The Concierge never makes insecure HTTP calls, so there is no reason to set HTTP_PROXY.
#! Optional.
https_proxy: #! e.g. http://proxy.example.com
no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local" #! do not proxy Kubernetes endpoints
#@schema/title "Mode"
#@ impersonation_mode_desc = "Enables or disables the impersonation proxy. Options are 'auto', 'disabled' or 'enabled'. \
#@ If auto, the impersonation proxy will run only if the cluster signing key is \
#@ not available and the other strategy does not work. \
#@ If enabled, the impersonation proxy will always run regardless of other strategies available. \
#@ If disabled, the impersonation proxy will never run, which could mean \
#@ that the concierge doesn't work at all."
#@schema/desc impersonation_mode_desc
#@schema/validation one_of=["auto", "disabled", "enabled"]
mode: auto
#@schema/title "External endpoint"
#@ external_endpoint_desc = "The endpoint which the client should use to connect to the impersonation proxy. \
#@ If left unset, the client will default to connecting based on the ClusterIP or LoadBalancer endpoint."
#@schema/desc external_endpoint_desc
#@schema/examples ("Specified impersonation proxy endpoint", "https://1.2.3.4:5678")
#@schema/nullable
#@schema/validation min_len=1
external_endpoint: ""
#@schema/title "Service"
#@schema/desc "The impersonation proxy service configuration"
service:
#@schema/title "Type"
#@ impersonation_service_type_desc = "Service backing the impersonation proxy. Options are 'LoadBalancer', 'ClusterIP' \
#@ and 'None'. LoadBalancer automatically provisions a Service of type LoadBalancer pointing at the impersonation \
#@ proxy. Some cloud providers will allocate a public IP address by default even on private clusters. ClusterIP \
#@ automatically provisions a Service of type ClusterIP pointing at the impersonation proxy. None does not provision \
#@ either and assumes that you have set the external_endpoint and set up your own ingress to connect to the impersonation proxy."
#@schema/desc impersonation_service_type_desc
#@schema/validation one_of=["LoadBalancer", "ClusterIP", "None"]
type: LoadBalancer
#@schema/title "Annotations"
#@ annotations_desc = "The annotations that should be set on the ClusterIP or LoadBalancer Service. The default includes \
#@ a value for the AWS-specific service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout annotation, which will \
#@ be ignored except when using AWS to provide load balancer Services."
#@schema/desc annotations_desc
#@schema/nullable
#@schema/type any=True
#@schema/validation ("a map of string keys and string values", validate_strings_map)
annotations:
#@schema/title "Load balancer IP"
#@schema/desc "When mode LoadBalancer is set, this will set the LoadBalancer Service's spec.loadBalancerIP."
#@schema/examples ("Specifying an IP", "1.2.3.4")
#@schema/nullable
#@schema/validation min_len=1
load_balancer_ip: ""
#@schema/title "HTTPS proxy"
#@ https_proxy_desc = "Set the standard golang HTTPS_PROXY and NO_PROXY environment variables on the Concierge containers. \
#@ These will be used when the Concierge makes backend-to-backend calls to authenticators using HTTPS, \
#@ e.g. when the Concierge fetches discovery documents and JWKS keys for JWTAuthenticators and POSTs to webhooks for WebhookAuthenticators. \
#@ The Concierge never makes insecure HTTP calls, so there is no reason to set HTTP_PROXY."
#@schema/desc https_proxy_desc
#@schema/examples ("Providing a proxy endpoint","http://proxy.example.com")
#@schema/nullable
#@schema/validation min_len=1
https_proxy: ""
#@schema/title "No proxy"
#@ no_proxy_desc = "Endpoints that should not be proxied. Defaults to not proxying internal Kubernetes endpoints, \
#@ localhost endpoints, and the known instance metadata IP address for public cloud providers."
#@schema/desc no_proxy_desc
no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local"

View File

@@ -1,19 +1,44 @@
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! Copyright 2020-2023 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@data/values
#@data/values-schema
---
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
#@schema/title "Image repo"
#@schema/desc "The repository for the local-user-authenticator container image."
#@schema/validation min_len=1
image_repo: projects.registry.vmware.com/pinniped/pinniped-server
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
#@schema/title "Image digest"
#@schema/desc "The image digest for the local-user-authenticator container image. If both image_digest or an image_tag are given, only image_digest will be used."
#@schema/examples ("Providing a digest", "sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8")
#@schema/nullable
#@schema/validation min_len=1, when=lambda _, ctx: ctx.parent["image_tag"] == None
image_digest: ""
#@schema/title "Image tag"
#@schema/desc "The image tag for the local-user-authenticator container image. If both image_digest or an image_tag are given, only image_digest will be used."
#@schema/examples ("Providing a tag", "v0.25.0")
#@schema/validation min_len=1, when=lambda _, ctx: ctx.parent["image_digest"] == None
image_tag: latest
#! Specifies a secret to be used when pulling the above `image_repo` container image.
#! Can be used when the above image_repo is a private registry.
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
#! Optional.
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
#@schema/title "Image pull dockerconfigjson"
#@ image_pull_dockerconfigjson_desc = "A base64 encoded secret to be used when pulling the `image_repo` container image. \
#@ Can be used when the image_repo is a private registry. Typically, the value would be the output of: \
#@ kubectl create secret docker-registry x --docker-server=https://example.io --docker-username='USERNAME' --docker-password='PASSWORD' --dry-run=client -o json | jq -r '.data[\".dockerconfigjson\"]'"
#@schema/desc image_pull_dockerconfigjson_desc
#@ example_desc = 'base64 encoding of: {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}'
#@ example_value = "eyJhdXRocyI6eyJodHRwczovL2V4YW1wbGUuaW8iOnsidXNlcm5hbWUiOiJVU0VSTkFNRSIsInBhc3N3b3JkIjoiUEFTU1dPUkQiLCJhdXRoIjoiVlZORlVrNUJUVVU2VUVGVFUxZFBVa1E9In19fQ=="
#@schema/examples (example_desc, example_value)
#@schema/nullable
#@schema/validation min_len=1
image_pull_dockerconfigjson: ""
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
#@schema/title "Run as user"
#@schema/desc "The user ID that will own the process."
#! See the Dockerfile for the reasoning behind this default value.
run_as_user: 65532
#@schema/title "Run as group"
#@schema/desc "The group ID that will own the process."
#! See the Dockerfile for the reasoning behind this default value.
run_as_group: 65532

View File

@@ -1,133 +1,260 @@
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
#! Copyright 2020-2023 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@data/values
---
#@ def validate_strings_map(obj):
#@ # Returns True if obj is an associative data structure string→string, and False otherwise.
#@ for key in obj:
#@ if type(key) != "string" or type(obj[key]) != "string":
#@ return False
#@ end
#@ end
#@ return True
#@ end
#@data/values-schema
---
#@schema/title "App name"
#@schema/desc "Used to help determine the names of various resources and labels."
#@schema/validation min_len=1
app_name: pinniped-supervisor
#! Creates a new namespace statically in yaml with the given name and installs the app into that namespace.
#@schema/title "Namespace"
#@schema/desc "Creates a new namespace statically in yaml with the given name and installs the app into that namespace."
#@schema/validation min_len=1
namespace: pinniped-supervisor
#! If specified, assumes that a namespace of the given name already exists and installs the app into that namespace.
#! If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used.
into_namespace: #! e.g. my-preexisting-namespace
#! All resources created statically by yaml at install-time and all resources created dynamically
#! by controllers at runtime will be labelled with `app: $app_name` and also with the labels
#! specified here. The value of `custom_labels` must be a map of string keys to string values.
#! The app can be uninstalled either by:
#! 1. Deleting the static install-time yaml resources including the static namespace, which will cascade and also delete
#! resources that were dynamically created by controllers at runtime
#! 2. Or, deleting all resources by label, which does not assume that there was a static install-time yaml namespace.
custom_labels: {} #! e.g. {myCustomLabelName: myCustomLabelValue, otherCustomLabelName: otherCustomLabelValue}
#@schema/title "Into namespace"
#@ into_namespace_desc = "If specified, assumes that a namespace of the given name already exists and installs the app into that namespace. \
#@ If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used."
#@schema/desc into_namespace_desc
#@schema/examples ("The name of an existing namespace", "my-preexisting-namespace")
#@schema/nullable
#@schema/validation min_len=1
into_namespace: ""
#! Specify how many replicas of the Pinniped server to run.
#@schema/title "Custom labels"
#@ custom_labels_desc = "All resources created statically by yaml at install-time and all resources created dynamically \
#@ by controllers at runtime will be labelled with `app: $app_name` and also with the labels specified here. The value of \
#@ `custom_labels` must be a map of string keys to string values. The app can be uninstalled either by: 1.) deleting the \
#@ static install-time yaml resources including the static namespace, which will cascade and also delete \
#@ resources that were dynamically created by controllers at runtime, or 2.) deleting all resources by label, which does \
#@ not assume that there was a static install-time yaml namespace."
#@schema/desc custom_labels_desc
#@schema/examples ("Example set of labels", {"myCustomLabelName": "myCustomLabelValue", "otherCustomLabelName": "otherCustomLabelValue"})
#@schema/type any=True
#@schema/validation ("a map of keys and values", validate_strings_map)
custom_labels: { }
#@schema/title "Replicas"
#@schema/desc "Specify how many replicas of the Pinniped server to run."
replicas: 2
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
#@schema/title "Image repo"
#@schema/desc "The repository for the Supervisor container image."
#@schema/validation min_len=1
image_repo: projects.registry.vmware.com/pinniped/pinniped-server
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
#@schema/title "Image digest"
#@schema/desc "The image digest for the Supervisor container image. If both image_digest or an image_tag are given, only image_digest will be used."
#@schema/examples ("Digest", "sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8")
#@schema/nullable
#@schema/validation min_len=1, when=lambda _, ctx: ctx.parent["image_tag"] == None
image_digest: ""
#@schema/title "Image tag"
#@schema/desc "The image tag for the Supervisor container image. If both image_digest or an image_tag are given, only image_digest will be used."
#@schema/examples ("Tag", "v0.25.0")
#@schema/validation min_len=1, when=lambda _, ctx: ctx.parent["image_digest"] == None
image_tag: latest
#! Specifies a secret to be used when pulling the above `image_repo` container image.
#! Can be used when the above image_repo is a private registry.
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
#! Optional.
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
#@schema/title "Image pull dockerconfigjson"
#@ image_pull_dockerconfigjson_desc = "A base64 encoded secret to be used when pulling the `image_repo` container image. \
#@ Can be used when the image_repo is a private registry. Typically, the value would be the output of: \
#@ kubectl create secret docker-registry x --docker-server=https://example.io --docker-username='USERNAME' --docker-password='PASSWORD' --dry-run=client -o json | jq -r '.data[\".dockerconfigjson\"]'"
#@schema/desc image_pull_dockerconfigjson_desc
#@ example_desc = 'base64 encoding of: {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}'
#@ example_value = "eyJhdXRocyI6eyJodHRwczovL2V4YW1wbGUuaW8iOnsidXNlcm5hbWUiOiJVU0VSTkFNRSIsInBhc3N3b3JkIjoiUEFTU1dPUkQiLCJhdXRoIjoiVlZORlVrNUJUVVU2VUVGVFUxZFBVa1E9In19fQ=="
#@schema/examples (example_desc, example_value)
#@schema/nullable
#@schema/validation min_len=1
image_pull_dockerconfigjson: ""
#! Specify how to expose the Supervisor app's HTTPS port as a Service.
#! Typically, you would set a value for only one of the following service types.
#! Setting any of these values means that a Service of that type will be created. They are all optional.
#! Note that all port numbers should be numbers (not strings), i.e. use ytt's `--data-value-yaml` instead of `--data-value`.
#! Several of these values have been deprecated and will be removed in a future release. Their names have been changed to
#! mark them as deprecated and to make it obvious upon upgrade to anyone who was using them that they have been deprecated.
deprecated_service_http_nodeport_port: #! will be removed in a future release; when specified, creates a NodePort Service with this `port` value, with port 8080 as its `targetPort`; e.g. 31234
deprecated_service_http_nodeport_nodeport: #! will be removed in a future release; the `nodePort` value of the NodePort Service, optional when `deprecated_service_http_nodeport_port` is specified; e.g. 31234
deprecated_service_http_loadbalancer_port: #! will be removed in a future release; when specified, creates a LoadBalancer Service with this `port` value, with port 8080 as its `targetPort`; e.g. 8443
deprecated_service_http_clusterip_port: #! will be removed in a future release; when specified, creates a ClusterIP Service with this `port` value, with port 8080 as its `targetPort`; e.g. 8443
service_https_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 8443 as its `targetPort`; e.g. 31243
service_https_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_https_nodeport_port` is specified; e.g. 31243
service_https_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 8443 as its `targetPort`; e.g. 8443
service_https_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 8443 as its `targetPort`; e.g. 8443
#! The `loadBalancerIP` value of the LoadBalancer Service.
#! Ignored unless service_https_loadbalancer_port is provided.
#! Optional.
service_loadbalancer_ip: #! e.g. 1.2.3.4
#@schema/title "Deprecated service HTTP nodeport port"
#@schema/desc "When specified, creates a NodePort Service with this `port` value, with port 8080 as its `targetPort`"
#@schema/examples ("Specify port",31234)
#@schema/nullable
#@schema/deprecated "This data value will be removed in a future release"
deprecated_service_http_nodeport_port: 0
#! Specify the verbosity of logging: info ("nice to know" information), debug (developer information), trace (timing information),
#! or all (kitchen sink). Do not use trace or all on production systems, as credentials may get logged.
log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs.
#! Specify the format of logging: json (for machine parsable logs) and text (for legacy klog formatted logs).
#! By default, when this value is left unset, logs are formatted in json.
#! This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json.
deprecated_log_format:
#@schema/title "Deprecated service http nodeport nodeport"
#@schema/desc "The `nodePort` value of the NodePort Service, optional when `deprecated_service_http_nodeport_port` is specified"
#@schema/examples ("Specify port",31234)
#@schema/nullable
#@schema/deprecated "This data value will be removed in a future release"
deprecated_service_http_nodeport_nodeport: 0
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
#@schema/title "Deprecated service http loadbalancer port"
#@schema/desc "When specified, creates a LoadBalancer Service with this `port` value, with port 8080 as its `targetPort`"
#@schema/examples ("Specify port",8443)
#@schema/nullable
#@schema/deprecated "This data value will be removed in a future release"
deprecated_service_http_loadbalancer_port: 0
#! Specify the API group suffix for all Pinniped API groups. By default, this is set to
#! pinniped.dev, so Pinniped API groups will look like foo.pinniped.dev,
#! authentication.concierge.pinniped.dev, etc. As an example, if this is set to tuna.io, then
#! Pinniped API groups will look like foo.tuna.io. authentication.concierge.tuna.io, etc.
#@schema/title "Deprecated service http clusterip port"
#@schema/desc "Creates a ClusterIP Service with this `port` value, with port 8080 as its `targetPort`"
#@schema/examples ("Specify port",8443)
#@schema/nullable
#@schema/deprecated "This data value will be removed in a future release"
deprecated_service_http_clusterip_port: 0
#@schema/title "Service https nodeport port"
#@schema/desc "When specified, creates a NodePort Service with this `port` value, with port 8443 as its `targetPort`"
#@schema/examples ("Specify port",31243)
#@schema/nullable
service_https_nodeport_port: 0
#@schema/title "Service https nodeport nodeport"
#@schema/desc "The `nodePort` value of the NodePort Service, optional when `service_https_nodeport_port` is specified"
#@schema/examples ("Specify port",31243)
#@schema/nullable
service_https_nodeport_nodeport: 0
#@schema/title "Service https loadbalancer port"
#@schema/desc "When specified, creates a LoadBalancer Service with this `port` value, with port 8443 as its `targetPort`"
#@schema/examples ("Specify port",8443)
#@schema/nullable
service_https_loadbalancer_port: 0
#@schema/title "Service https clusterip port"
#@schema/desc "When specified, creates a ClusterIP Service with this `port` value, with port 8443 as its `targetPort`"
#@schema/examples ("Specify port",8443)
#@schema/nullable
service_https_clusterip_port: 0
#@schema/title "Service loadbalancer ip"
#@schema/desc "The `loadBalancerIP` value of the LoadBalancer Service. Ignored unless service_https_loadbalancer_port is provided."
#@schema/examples ("Example IP address","1.2.3.4")
#@schema/nullable
service_loadbalancer_ip: ""
#@schema/title "Log level"
#@ log_level_desc = "Specify the verbosity of logging: info (\"nice to know\" information), debug (developer information), trace (timing information), \
#@ or all (kitchen sink). Do not use trace or all on production systems, as credentials may get logged. \
#@ When this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs."
#@schema/desc log_level_desc
#@schema/examples ("Developer logging information","debug")
#@schema/nullable
#@schema/validation one_of=["info", "debug", "trace", "all"]
log_level: ""
#@schema/title "Log format"
#@ deprecated_log_format_desc = "Specify the format of logging: json (for machine parsable logs) and text (for legacy klog formatted logs). \
#@ By default, when this value is left unset, logs are formatted in json. \
#@ This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json."
#@schema/desc deprecated_log_format_desc
#@schema/examples ("Set logs to JSON format","json")
#@schema/nullable
#@schema/validation one_of=["json", "text"]
#@schema/deprecated "This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json."
deprecated_log_format: ""
#@schema/title "Run as user"
#@schema/desc "The user ID that will own the process."
#! See the Dockerfile for the reasoning behind this default value.
run_as_user: 65532
#@schema/title "Run as group"
#@schema/desc "The group ID that will own the process."
#! See the Dockerfile for the reasoning behind this default value.
run_as_group: 65532
#@schema/title "API group suffix"
#@ api_group_suffix_desc = "Specify the API group suffix for all Pinniped API groups. By default, this is set to \
#@ pinniped.dev, so Pinniped API groups will look like foo.pinniped.dev, \
#@ config.supervisor.pinniped.dev, etc. As an example, if this is set to tuna.io, then \
#@ Pinniped API groups will look like foo.tuna.io. config.supervisor.tuna.io, etc."
#@schema/desc api_group_suffix_desc
#@schema/validation min_len=1
api_group_suffix: pinniped.dev
#! Set the standard golang HTTPS_PROXY and NO_PROXY environment variables on the Supervisor containers.
#! These will be used when the Supervisor makes backend-to-backend calls to upstream identity providers using HTTPS,
#! e.g. when the Supervisor fetches discovery documents, JWKS keys, and tokens from an upstream OIDC Provider.
#! The Supervisor never makes insecure HTTP calls, so there is no reason to set HTTP_PROXY.
#! Optional.
https_proxy: #! e.g. http://proxy.example.com
no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local" #! do not proxy Kubernetes endpoints
#@schema/title "HTTPS proxy"
#@ https_proxy_desc = "Set the standard golang HTTPS_PROXY and NO_PROXY environment variables on the Supervisor containers. \
#@ These will be used when the Supervisor makes backend-to-backend calls to upstream identity providers using HTTPS, \
#@ e.g. when the Supervisor fetches discovery documents, JWKS keys, and tokens from an upstream OIDC Provider. \
#@ The Supervisor never makes insecure HTTP calls, so there is no reason to set HTTP_PROXY."
#@schema/desc https_proxy_desc
#@schema/examples ("Providing a proxy endpoint","http://proxy.example.com")
#@schema/nullable
#@schema/validation min_len=1
https_proxy: ""
#! Control the HTTP and HTTPS listeners of the Supervisor.
#!
#! The schema of this config is as follows:
#!
#! endpoints:
#! https:
#! network: tcp | unix | disabled
#! address: host:port when network=tcp or /pinniped_socket/socketfile.sock when network=unix
#! http:
#! network: same as above
#! address: same as above, except that when network=tcp then the address is only allowed to bind to loopback interfaces
#!
#! Setting network to disabled turns off that particular listener.
#! See https://pkg.go.dev/net#Listen and https://pkg.go.dev/net#Dial for a description of what can be
#! specified in the address parameter based on the given network parameter. To aid in the use of unix
#! domain sockets, a writable empty dir volume is mounted at /pinniped_socket when network is set to "unix."
#!
#! The current defaults are:
#!
#! endpoints:
#! https:
#! network: tcp
#! address: :8443
#! http:
#! network: disabled
#!
#! These defaults mean: For HTTPS listening, bind to all interfaces using TCP on port 8443.
#! Disable HTTP listening by default.
#!
#! The HTTP listener can only be bound to loopback interfaces. This allows the listener to accept
#! traffic from within the pod, e.g. from a service mesh sidecar. The HTTP listener should not be
#! used to accept traffic from outside the pod, since that would mean that the network traffic could be
#! transmitted unencrypted. The HTTPS listener should be used instead to accept traffic from outside the pod.
#! Ingresses and load balancers that terminate TLS connections should re-encrypt the data and route traffic
#! to the HTTPS listener. Unix domain sockets may also be used for integrations with service meshes.
#!
#! Changing the HTTPS port number must be accompanied by matching changes to the service and deployment
#! manifests. Changes to the HTTPS listener must be coordinated with the deployment health checks.
#!
#! Optional.
endpoints:
#@schema/title "No proxy"
#@ no_proxy_desc = "Endpoints that should not be proxied. Defaults to not proxying internal Kubernetes endpoints, \
#@ localhost endpoints, and the known instance metadata IP address for public cloud providers."
#@schema/desc no_proxy_desc
no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local"
#! Optionally override the validation on the endpoints.http value which checks that only loopback interfaces are used.
#! When deprecated_insecure_accept_external_unencrypted_http_requests is true, the HTTP listener is allowed to bind to any
#! interface, including interfaces that are listening for traffic from outside the pod. This value is being introduced
#! to ease the transition to the new loopback interface validation for the HTTP port for any users who need more time
#! to change their ingress strategy to avoid using plain HTTP into the Supervisor pods.
#! This value is immediately deprecated upon its introduction. It will be removed in some future release, at which time
#! traffic from outside the pod will need to be sent to the HTTPS listener instead, with no simple workaround available.
#! Allowed values are true (boolean), "true" (string), false (boolean), and "false" (string). The default is false.
#! Optional.
#@schema/title "Endpoints"
#@ endpoints_desc = "Control the HTTP and HTTPS listeners of the Supervisor. The current defaults are: \
#@ {\"https\":{\"network\":\"tcp\",\"address\":\":8443\"},\"http\":\"disabled\"}. \
#@ These defaults mean: 1.) for HTTPS listening, bind to all interfaces using TCP on port 8443 and \
#@ 2.) disable HTTP listening by default. \
#@ The schema of this config is as follows: \
#@ {\"https\":{\"network\":\"tcp | unix | disabled\",\"address\":\"host:port when network=tcp or /pinniped_socket/socketfile.sock when network=unix\"},\"http\":{\"network\":\"tcp | unix | disabled\",\"address\":\"same as https, except that when network=tcp then the address is only allowed to bind to loopback interfaces\"}} \
#@ The HTTP listener can only be bound to loopback interfaces. This allows the listener to accept \
#@ traffic from within the pod, e.g. from a service mesh sidecar. The HTTP listener should not be \
#@ used to accept traffic from outside the pod, since that would mean that the network traffic could be \
#@ transmitted unencrypted. The HTTPS listener should be used instead to accept traffic from outside the pod. \
#@ Ingresses and load balancers that terminate TLS connections should re-encrypt the data and route traffic \
#@ to the HTTPS listener. Unix domain sockets may also be used for integrations with service meshes. \
#@ Changing the HTTPS port number must be accompanied by matching changes to the service and deployment \
#@ manifests. Changes to the HTTPS listener must be coordinated with the deployment health checks."
#@schema/desc endpoints_desc
#@schema/examples ("Example matching default settings", '{"https":{"network":"tcp","address":":8443"},"http":"disabled"}')
#@schema/type any=True
#@ def validate_endpoint(endpoint):
#@ if(type(endpoint) not in ["yamlfragment", "string"]):
#@ return False
#@ end
#@ if(type(endpoint) in ["string"]):
#@ if (endpoint != "disabled"):
#@ return False
#@ end
#@ end
#@ if(type(endpoint) in ["yamlfragment"]):
#@ if (endpoint["network"] not in ["tcp", "unix", "disabled"]):
#@ return False
#@ end
#@ if (type(endpoint["address"]) not in ["string"]):
#@ return False
#@ end
#@ end
#@ return True
#@ end
#@ def validate_endpoints(endpoints):
#@ """
#@ Returns True if endpoints fulfill the expected structure
#@ """
#@ http_val = endpoints["http"]
#@ https_val = endpoints["https"]
#@ return validate_endpoint(http_val) and validate_endpoint(https_val)
#@ end
#@schema/nullable
#@schema/validation ("a map with keys 'http' and 'https', whose values are either the string 'disabled' or a map having keys 'network' and 'address', and the value of 'network' must be one of the allowed values", validate_endpoints)
endpoints: { }
#@ deprecated_insecure_accept_external_unencrypted_http_requests_desc = "Optionally override the validation on the endpoints.http \
#@ value which checks that only loopback interfaces are used. \
#@ When deprecated_insecure_accept_external_unencrypted_http_requests is true, the HTTP listener is allowed to bind to any \
#@ interface, including interfaces that are listening for traffic from outside the pod. This value is being introduced \
#@ to ease the transition to the new loopback interface validation for the HTTP port for any users who need more time \
#@ to change their ingress strategy to avoid using plain HTTP into the Supervisor pods. \
#@ This value is immediately deprecated upon its introduction. It will be removed in some future release, at which time \
#@ traffic from outside the pod will need to be sent to the HTTPS listener instead, with no simple workaround available. \
#@ Allowed values are true (boolean), 'true' (string), false (boolean), and 'false' (string). The default is false."
#@schema/desc deprecated_insecure_accept_external_unencrypted_http_requests_desc
#@schema/type any=True
#@schema/validation ("a boolean or string version of boolean", lambda v: type(v) in ["string", "boolean"])
#@schema/validation one_of=["true", "false", True, False]
#@schema/deprecated "This data value will be removed in a future release"
deprecated_insecure_accept_external_unencrypted_http_requests: false

View File

@@ -1,11 +1,31 @@
#!/usr/bin/env bash
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2023 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${ROOT}"
source hack/lib/helpers.sh
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
reg_name='kind-registry.local'
# If the container is running...
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" == 'true' ]; then
# Disconnect it from the kind network, if it was connected.
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" != 'null' ]; then
docker network disconnect "kind" "${reg_name}" >/dev/null
fi
log_note "Stopping container $reg_name ..."
docker stop "${reg_name}" >/dev/null
# Delete it.
docker rm "${reg_name}" >/dev/null
fi
fi
kind delete cluster --name pinniped

View File

@@ -8,13 +8,60 @@ set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${ROOT}"
if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then
echo "Adding Contour port mapping to Kind config."
ytt -f "${ROOT}/hack/lib/kind-config/single-node.yaml" \
-f "${ROOT}/hack/lib/kind-config/contour-overlay.yaml" >/tmp/kind-config.yaml
kind create cluster --config /tmp/kind-config.yaml --name pinniped
else
# To choose a specific version of kube, add this option to the command below: `--image kindest/node:v1.28.0`.
# To debug the kind config, add this option to the command below: `-v 10`
kind create cluster --config "hack/lib/kind-config/single-node.yaml" --name pinniped
source hack/lib/helpers.sh
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
# Create registry container unless it already exists.
reg_name='kind-registry.local'
reg_port='5000'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
log_note "Running the registry:2 docker image..."
docker run \
--detach \
--restart=always \
--publish "127.0.0.1:${reg_port}:5000" \
--name "${reg_name}" \
registry:2
fi
fi
use_contour_registry=""
if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then
log_note "Adding Contour port mapping to Kind config."
use_contour_registry="--file=${ROOT}/hack/lib/kind-config/contour-overlay.yaml"
fi
use_kind_registry=""
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
log_note "Adding local registry to Kind config."
use_kind_registry="--file=${ROOT}/hack/lib/kind-config/kind-registry-overlay.yaml"
fi
# Do not quote ${use_kind_registry} ${use_contour_registry} in this command because they might be empty.
ytt ${use_kind_registry} ${use_contour_registry} --file="${ROOT}"/hack/lib/kind-config/single-node.yaml >/tmp/kind-config.yaml
# To choose a specific version of kube, add this option to the command below: `--image kindest/node:v1.28.0`.
# To debug the kind config, add this option to the command below: `-v 10`
kind create cluster --config /tmp/kind-config.yaml --name pinniped
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
# Connect the registry to the cluster network if not already connected.
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" == 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# Document the local registry.
# See https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF
fi

135
hack/lib/carvel_packages/build.sh Executable file
View File

@@ -0,0 +1,135 @@
#!/usr/bin/env bash
# Copyright 2023 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# This script can be used in conjunction with prepare-for-integration-tests.sh.
# When invoked with the PINNIPED_USE_LOCAL_KIND_REGISTRY environment variable set to a non-empty value,
# the prepare-for-integration-tests.sh script will create a local docker registry and configure kind to use the registry.
# This script will build the Pinniped binary and container image.
# This script will then create Carvel Packages for supervisor, concierge, and local-user-authenticator.
# It will also create a Carvel PackageRepository.
# The PackageRepository will be installed on the kind cluster.
#
# Example usage:
# PINNIPED_USE_LOCAL_KIND_REGISTRY=1 ./hack/prepare-for-integration-tests.sh --clean --pre-install ./hack/lib/carvel_packages/build.sh --alternate-deploy ./hack/lib/carvel_packages/deploy.sh
#
set -euo pipefail
# This script is best invoked from the root directory.
# It is designed to be passed as --pre-install flag to hack/prepare-for-integration-tests.sh.
hack_lib_path="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${hack_lib_path}/../../" || exit 1
source hack/lib/helpers.sh
# Check for dependencies
check_dependency kbld "Please install kbld. e.g. 'brew tap vmware-tanzu/carvel && brew install kbld' for MacOS"
check_dependency imgpkg "Please install imgpkg. e.g. 'brew tap vmware-tanzu/carvel && brew install imgpkg' for MacOS"
check_dependency vendir "Please install vendir. e.g. 'brew tap vmware-tanzu/carvel && brew install vendir' for MacOS"
# Expected arguments.
app=${1:-"app-argument-not-provided"}
tag=${2:-"tag-argument-not-provided"}
registry=${3:-"registry-argument-not-provided"}
repo=${4:-"repo-argument-not-provided"}
log_note "build.sh called with app: ${app} tag: ${tag} registry: ${registry} repo: ${repo}"
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" == "" ]]; then
log_error "Building the Carvel package requires configuring kind with a local registry."
log_error "please set the environment variable PINNIPED_USE_LOCAL_KIND_REGISTRY"
log_error "for example:"
log_error " PINNIPED_USE_LOCAL_KIND_REGISTRY=1 ./hack/prepare-for-integration-tests.sh --clean --pre-install ./hack/lib/carvel_packages/build.sh --alternate-deploy ./hack/lib/carvel_packages/deploy.sh"
exit 1
fi
pinniped_package_version="${tag}" # ie, "0.25.0"
registry_repo="$registry/$repo"
api_group_suffix="pinniped.dev"
# Package prefix for pinniped-concierge, pinniped-supervisor, local-user-authenticator
package_repo_prefix="${registry_repo}/package" # + $resource_name + ":" + $tag
# Pinniped Package repository
package_repository_repo="pinniped-package-repository"
package_repository_repo_tag="${registry_repo}/${package_repository_repo}:${tag}"
carvel_package_src="hack/lib/carvel_packages"
template_src_dir="${carvel_package_src}/templates"
dest_dir="deploy_carvel_tmp"
# clean the root carvel package directory
rm -rf "${dest_dir}"
mkdir -p "${dest_dir}"
# Generate the OpenAPI v3 Schema files, imgpkg images.yml files
declare -a packages_to_build=("local-user-authenticator" "pinniped-concierge" "pinniped-supervisor")
for resource_name in "${packages_to_build[@]}"; do
resource_qualified_name="${resource_name}.${api_group_suffix}"
package_repo_tag="${package_repo_prefix}-${resource_name}:${tag}"
# sources
resource_package_template_source_dir="${template_src_dir}/${resource_name}"
# destinations
resource_destination_dir="${dest_dir}/${resource_name}"
resource_config_destination_dir="${resource_destination_dir}/config"
log_note "Copying static template files for ${resource_name}..."
mkdir "${resource_destination_dir}"
cp "${resource_package_template_source_dir}/metadata.yml" "${resource_destination_dir}/metadata.yml"
cp "${resource_package_template_source_dir}/build.yml" "${resource_destination_dir}/build.yml"
cp "${resource_package_template_source_dir}/vendir.yml" "${resource_destination_dir}/vendir.yml"
cp "${resource_package_template_source_dir}/release_notes.txt" "${resource_destination_dir}/release_notes.txt" # dummy
log_note "Vendir sync deploy directory for ${resource_name} to package bundle..."
pushd "${resource_destination_dir}" >/dev/null
vendir sync
popd >/dev/null
log_note "Generating OpenAPI v3 schema for ${resource_name}..."
ytt \
--file "${resource_config_destination_dir}" \
--data-values-schema-inspect \
--output openapi-v3 > \
"${resource_destination_dir}/schema-openapi.yml"
log_note "Generating .imgpkg/images.yml for ${resource_name}..."
mkdir -p "${resource_destination_dir}/.imgpkg"
ytt \
--file "${resource_config_destination_dir}" |
kbld -f- --imgpkg-lock-output "${resource_destination_dir}/.imgpkg/images.yml"
log_note "Pushing Pinniped ${resource_name} Package bundle..."
imgpkg push --bundle "${package_repo_tag}" --file "${resource_destination_dir}"
log_note "Generating PackageRepository Package entry for ${resource_name}"
# Publish package versions to package repository.
packages_dir="${dest_dir}/package_repository/packages/"
package_repository_dir="${packages_dir}/${resource_qualified_name}"
mkdir -p "${packages_dir}"
rm -rf "${package_repository_dir}"
mkdir "${package_repository_dir}"
ytt \
--file "${resource_package_template_source_dir}/package-template.yml" \
--data-value-file openapi="${resource_destination_dir}/schema-openapi.yml" \
--data-value-file releaseNotes="${resource_destination_dir}/release_notes.txt" \
--data-value repo_host="${package_repo_prefix}-${resource_name}" \
--data-value version="${pinniped_package_version}" >"${package_repository_dir}/${pinniped_package_version}.yml"
cp "${resource_package_template_source_dir}/metadata.yml" "${package_repository_dir}/metadata.yml"
done
log_note "Generating .imgpkg/images.yml for Pinniped PackageRepository bundle..."
mkdir -p "${dest_dir}/package_repository/.imgpkg"
kbld --file "${dest_dir}/package_repository/packages/" --imgpkg-lock-output "${dest_dir}/package_repository/.imgpkg/images.yml"
log_note "Pushing Pinniped PackageRepository bundle.... "
imgpkg push --bundle "${package_repository_repo_tag}" --file "${dest_dir}/package_repository"
# manually validate the package bundle by pulling it from the registry and examining its contents:
# imgpkg pull --bundle "${package_repository_repo_tag}" --output "/tmp/${package_repository_repo_tag}"
log_note "Building Carvel Packages for Supervisor, Concierge & local-user-authenticator complete."

View File

@@ -0,0 +1,177 @@
#!/usr/bin/env bash
# Copyright 2023 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# This script can be used in conjunction with prepare-for-integration-tests.sh.
# When invoked with the PINNIPED_USE_LOCAL_KIND_REGISTRY environment variable set to a non-empty value,
# the prepare-for-integration-tests.sh script will create a local docker registry and configure kind to use the registry.
# This script will deploy the Carvel Packages for supervisor, concierge, or local-user-authenticator.
#
# Example usage:
# PINNIPED_USE_LOCAL_KIND_REGISTRY=1 ./hack/prepare-for-integration-tests.sh --clean --pre-install ./hack/lib/carvel_packages/build.sh --alternate-deploy ./hack/lib/carvel_packages/deploy.sh
#
set -euo pipefail
# This script is best invoked from the root directory.
# It is designed to be passed as --alternate-deploy flag to hack/prepare-for-integration-tests.sh.
hack_lib_path="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$hack_lib_path/../../" || exit 1
source hack/lib/helpers.sh
# Expected arguments.
app=${1:-"app-argument-not-provided"}
tag=${2:-"tag-argument-not-provided"}
registry=${3:-"registry-argument-not-provided"}
repo=${4:-"repo-argument-not-provided"}
data_values_file=${5:-"ytt-data-values-file-argument-not-provided"}
log_note "deploy.sh called with app: ${app} tag: ${tag} registry: ${registry} repo: ${repo} data_values_file: ${data_values_file}"
log_note "Begin deploy of carvel package for ${app}..."
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" == "" ]]; then
log_error "Building the Carvel package requires configuring kind with a local registry."
log_error "Please set the environment variable PINNIPED_USE_LOCAL_KIND_REGISTRY."
log_error "For example:"
log_error " PINNIPED_USE_LOCAL_KIND_REGISTRY=1 ./hack/prepare-for-integration-tests.sh --clean --pre-install ./hack/lib/carvel_packages/build.sh --alternate-deploy ./hack/lib/carvel_packages/deploy.sh"
exit 1
fi
pinniped_package_version="${tag}" # ie, "0.25.0"
registry_repo="$registry/$repo"
# Pinniped Package repository
package_repository_repo="pinniped-package-repository"
package_repository_repo_tag="${registry_repo}/${package_repository_repo}:${tag}"
# Use the same directory as build.sh.
dest_dir="deploy_carvel_tmp"
# Deploy kapp-controller onto kind cluster.
log_note "Installing kapp-controller on cluster..."
KAPP_CONTROLLER_GLOBAL_NAMESPACE="kapp-controller-packaging-global"
kapp deploy --app kapp-controller --file "https://github.com/vmware-tanzu/carvel-kapp-controller/releases/latest/download/release.yml" -y
# Ensure this directory exists though this script will run several times.
mkdir -p "${dest_dir}/install"
log_note "Deploying Pinniped PackageRepository..."
pinniped_package_repository_name="pinniped-package-repository"
pinniped_package_repository_file="${dest_dir}/install/packagerepository.${pinniped_package_version}.yml"
cat <<EOT >"${pinniped_package_repository_file}"
---
apiVersion: packaging.carvel.dev/v1alpha1
kind: PackageRepository
metadata:
name: "${pinniped_package_repository_name}"
namespace: "${KAPP_CONTROLLER_GLOBAL_NAMESPACE}"
spec:
fetch:
imgpkgBundle:
image: "${package_repository_repo_tag}"
EOT
kapp deploy --app "${pinniped_package_repository_name}" --file "${pinniped_package_repository_file}" -y
kapp inspect --app "${pinniped_package_repository_name}" --tree
resource_name="${app}"
log_note "Creating RBAC for ${resource_name} PackageInstall..."
namespace="${resource_name}-install-ns"
pinniped_package_rbac_prefix="pinniped-package-rbac-${resource_name}"
pinniped_package_rbac_file="${dest_dir}/install/${pinniped_package_rbac_prefix}-${resource_name}-rbac.yml"
# NOTE: This script is for development purposes running on a local kind cluster.
# For any other use case, the generated artifacts should be properly reviewed.
# For example, the RBAC generated here should be adjusted to conform to the
# principle of LEAST privilege.
cat <<EOF >"${pinniped_package_rbac_file}"
---
apiVersion: v1
kind: Namespace
metadata:
name: "${namespace}"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "${pinniped_package_rbac_prefix}-sa-superadmin-dangerous"
namespace: "${namespace}"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "${pinniped_package_rbac_prefix}-role-superadmin-dangerous"
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["*"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "${pinniped_package_rbac_prefix}-role-binding-superadmin-dangerous"
subjects:
- kind: ServiceAccount
name: "${pinniped_package_rbac_prefix}-sa-superadmin-dangerous"
namespace: "${namespace}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "${pinniped_package_rbac_prefix}-role-superadmin-dangerous"
EOF
kapp deploy --app "${pinniped_package_rbac_prefix}" --file "${pinniped_package_rbac_file}" -y
log_note "Creating ${resource_name} PackageInstall..."
NAMESPACE="${resource_name}-install-ns"
PINNIPED_PACKAGE_RBAC_PREFIX="pinniped-package-rbac-${resource_name}"
RESOURCE_PACKAGE_VERSION="${resource_name}.pinniped.dev"
PACKAGE_INSTALL_FILE_NAME="${dest_dir}/install/${resource_name}-pkginstall.yml"
SECRET_NAME="${resource_name}-package-install-secret"
log_note "Generating ${PACKAGE_INSTALL_FILE_NAME}..."
cat >"${PACKAGE_INSTALL_FILE_NAME}" <<EOF
---
apiVersion: packaging.carvel.dev/v1alpha1
kind: PackageInstall
metadata:
# name, does not have to be versioned, versionSelection.constraints below will handle
name: "${resource_name}-package-install"
namespace: "${NAMESPACE}"
spec:
serviceAccountName: "${PINNIPED_PACKAGE_RBAC_PREFIX}-sa-superadmin-dangerous"
packageRef:
refName: "${RESOURCE_PACKAGE_VERSION}"
versionSelection:
constraints: "${pinniped_package_version}"
values:
- secretRef:
name: "${SECRET_NAME}"
EOF
log_note "Creating secret ${SECRET_NAME} with ${data_values_file}..."
kubectl create secret generic "${SECRET_NAME}" --namespace "${NAMESPACE}" --from-file "${data_values_file}" --dry-run=client -o yaml | kubectl apply -f-
KAPP_CONTROLLER_APP_NAME="${resource_name}-pkginstall"
log_note "Deploying ${KAPP_CONTROLLER_APP_NAME}..."
kapp deploy --app "${KAPP_CONTROLLER_APP_NAME}" --file "${PACKAGE_INSTALL_FILE_NAME}" -y
log_note "Verifying PackageInstall resources..."
kubectl get PackageInstall -A | grep pinniped
kubectl get secret -A | grep pinniped
log_note "Listing all package resources (PackageRepository, Package, PackageInstall)..."
kubectl get pkgi && kubectl get pkgr && kubectl get pkg
log_note "Listing all kapp cli apps..."
kapp ls --all-namespaces
log_note "Listing all kapp-controller apps..."
kubectl get app --all-namespaces
log_note "Complete deploy of carvel package for ${app}..."

View File

@@ -0,0 +1,4 @@
apiVersion: kbld.k14s.io/v1alpha1
kind: Config
minimumRequiredVersion: 0.31.0
overrides:

View File

@@ -0,0 +1,11 @@
apiVersion: data.packaging.carvel.dev/v1alpha1
kind: PackageMetadata
metadata:
name: local-user-authenticator.pinniped.dev
spec:
displayName: "local-user-authenticator"
longDescription: "The local-user-authenticator app is an identity provider used for integration testing and demos. Note that this is not recommended for
production use."
shortDescription: "The local-user-authenticator app is an identity provider used for integration testing and demos."
categories:
- auth

View File

@@ -0,0 +1,28 @@
#@ load("@ytt:data", "data") # for reading data values (generated via ytt's data-values-schema-inspect mode).
#@ load("@ytt:yaml", "yaml") # for dynamically decoding the output of ytt's data-values-schema-inspect
---
apiVersion: data.packaging.carvel.dev/v1alpha1
kind: Package
metadata:
name: #@ "local-user-authenticator.pinniped.dev." + data.values.version
spec:
refName: local-user-authenticator.pinniped.dev
version: #@ data.values.version
releaseNotes: #@ yaml.decode(data.values.releaseNotes)
valuesSchema:
openAPIv3: #@ yaml.decode(data.values.openapi)["components"]["schemas"]["dataValues"]
template:
spec:
fetch:
- imgpkgBundle:
image: #@ data.values.repo_host + ":" + data.values.version
template:
- ytt:
paths:
- "config/"
- kbld:
paths:
- ".imgpkg/images.yml"
- "-"
deploy:
- kapp: {}

View File

@@ -0,0 +1,4 @@
Dummy Release Notes for local-user-authenticator
- foo
- bar
- baz

View File

@@ -0,0 +1,8 @@
apiVersion: vendir.k14s.io/v1alpha1
kind: Config
directories:
- path: config
contents:
- path: .
directory:
path: ../../deploy/local-user-authenticator

View File

@@ -0,0 +1,4 @@
apiVersion: kbld.k14s.io/v1alpha1
kind: Config
minimumRequiredVersion: 0.31.0
overrides:

View File

@@ -0,0 +1,10 @@
apiVersion: data.packaging.carvel.dev/v1alpha1
kind: PackageMetadata
metadata:
name: pinniped-concierge.pinniped.dev
spec:
displayName: "Pinniped Concierge"
longDescription: "Pinniped concierge enables consistent login across Kubernetes clusters on public cloud providers such as AKS, EKS and GKE"
shortDescription: "Pinniped concierge enables consistent login across public clouds"
categories:
- auth

View File

@@ -0,0 +1,28 @@
#@ load("@ytt:data", "data") # for reading data values (generated via ytt's data-values-schema-inspect mode).
#@ load("@ytt:yaml", "yaml") # for dynamically decoding the output of ytt's data-values-schema-inspect
---
apiVersion: data.packaging.carvel.dev/v1alpha1
kind: Package
metadata:
name: #@ "pinniped-concierge.pinniped.dev." + data.values.version
spec:
refName: pinniped-concierge.pinniped.dev
version: #@ data.values.version
releaseNotes: #@ yaml.decode(data.values.releaseNotes)
valuesSchema:
openAPIv3: #@ yaml.decode(data.values.openapi)["components"]["schemas"]["dataValues"]
template:
spec:
fetch:
- imgpkgBundle:
image: #@ data.values.repo_host + ":" + data.values.version
template:
- ytt:
paths:
- "config/"
- kbld:
paths:
- ".imgpkg/images.yml"
- "-"
deploy:
- kapp: {}

View File

@@ -0,0 +1,4 @@
Dummy Release Notes for Concierge
- foo
- bar
- baz

View File

@@ -0,0 +1,8 @@
apiVersion: vendir.k14s.io/v1alpha1
kind: Config
directories:
- path: config
contents:
- path: .
directory:
path: ../../deploy/concierge

View File

@@ -0,0 +1,4 @@
apiVersion: kbld.k14s.io/v1alpha1
kind: Config
minimumRequiredVersion: 0.31.0
overrides:

View File

@@ -0,0 +1,10 @@
apiVersion: data.packaging.carvel.dev/v1alpha1
kind: PackageMetadata
metadata:
name: pinniped-supervisor.pinniped.dev
spec:
displayName: "Pinniped Supervisor"
longDescription: "Pinniped supervisor allows seamless login across one or many Kubernetes clusters including AKS, EKS and GKE"
shortDescription: "Pinniped supervisor provides login capabilities"
categories:
- auth

View File

@@ -0,0 +1,28 @@
#@ load("@ytt:data", "data") # for reading data values (generated via ytt's data-values-schema-inspect mode).
#@ load("@ytt:yaml", "yaml") # for dynamically decoding the output of ytt's data-values-schema-inspect
---
apiVersion: data.packaging.carvel.dev/v1alpha1
kind: Package
metadata:
name: #@ "pinniped-supervisor.pinniped.dev." + data.values.version
spec:
refName: pinniped-supervisor.pinniped.dev
version: #@ data.values.version
releaseNotes: #@ yaml.decode(data.values.releaseNotes)
valuesSchema:
openAPIv3: #@ yaml.decode(data.values.openapi)["components"]["schemas"]["dataValues"]
template:
spec:
fetch:
- imgpkgBundle:
image: #@ data.values.repo_host + ":" + data.values.version
template:
- ytt:
paths:
- "config/"
- kbld:
paths:
- ".imgpkg/images.yml"
- "-"
deploy:
- kapp: {}

View File

@@ -0,0 +1,4 @@
Dummy Release Notes for Supervisor
- foo
- bar
- baz

View File

@@ -0,0 +1,8 @@
apiVersion: vendir.k14s.io/v1alpha1
kind: Config
directories:
- path: config
contents:
- path: .
directory:
path: ../../deploy/supervisor

34
hack/lib/helpers.sh Normal file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
# Copyright 2023 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Helper functions
#
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}

View File

@@ -0,0 +1,11 @@
#! Copyright 2023 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"kind": "Cluster"}), expects=1
---
#@overlay/match missing_ok=True
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."kind-registry.local:5000"]
endpoint = ["http://kind-registry.local:5000"]

View File

@@ -6,41 +6,21 @@
#
# This script can be used to prepare a kind cluster and deploy the app.
# You can call this script again to redeploy the app.
# It will also output instructions on how to run the integration.
# It will also output instructions on how to run the integration tests.
#
# When invoked with the PINNIPED_USE_LOCAL_KIND_REGISTRY environment variable set to a non-empty value, then
# this script will create a local registry and configure kind to use that registry. This is normally unnecessary.
# However, if an alternative build and deploy approach is used, such as via a Carvel packaging mechanism, then a local
# registry could be needed (e.g. the kbld tool requires a registry to resolve images to shas).
# For example, to alternatively build and deploy Pinniped as a Carvel package, use:
# PINNIPED_USE_LOCAL_KIND_REGISTRY=1 ./hack/prepare-for-integration-tests.sh --clean --pre-install ./hack/lib/carvel_packages/build.sh --alternate-deploy ./hack/lib/carvel_packages/deploy.sh
#
set -euo pipefail
#
# Helper functions
#
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
pinniped_path="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$pinniped_path" || exit 1
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}
source hack/lib/helpers.sh
#
# Handle argument parsing and help message
@@ -52,14 +32,8 @@ api_group_suffix="pinniped.dev" # same default as in the values.yaml ytt file
dockerfile_path=""
get_active_directory_vars="" # specify a filename for a script to get AD related env variables
alternate_deploy="undefined"
alternate_deploy_supervisor="undefined"
alternate_deploy_concierge="undefined"
alternate_deploy_local_user_authenticator="undefined"
pre_install="undefined"
# supported variable style:
# --dockerfile-path ./foo.sh
# unsupported variable style (using = will fail the script):
# --dockerfile-path=./foo.sh
while (("$#")); do
case "$1" in
-h | --help)
@@ -113,31 +87,13 @@ while (("$#")); do
alternate_deploy=$1
shift
;;
--alternate-deploy-supervisor)
--pre-install)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy-supervisor requires a script path to be specified"
log_error "--pre-install requires a script path to be specified"
exit 1
fi
alternate_deploy_supervisor=$1
shift
;;
--alternate-deploy-concierge)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy-concierge requires a script path to be specified"
exit 1
fi
alternate_deploy_concierge=$1
shift
;;
--alternate-deploy-local-user-authenticator)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy-local-user-authenticator requires a script path to be specified"
exit 1
fi
alternate_deploy_local_user_authenticator=$1
pre_install=$1
shift
;;
-*)
@@ -160,21 +116,16 @@ if [[ "$help" == "yes" ]]; then
log_note " $me [flags]"
log_note
log_note "Flags:"
log_note " -h, --help: print this usage"
log_note " -c, --clean: destroy the current kind cluster and make a new one"
log_note " -g, --api-group-suffix: deploy Pinniped with an alternate API group suffix"
log_note " -s, --skip-build: reuse the most recently built image of the app instead of building"
log_note " -a, --get-active-directory-vars: specify a script that exports active directory environment variables"
log_note " --alternate-deploy: specify an alternate deploy script to install all components of Pinniped"
log_note " --alternate-deploy-supervisor: specify an alternate deploy script to install Pinniped Supervisor"
log_note " --alternate-deploy-concierge: specify an alternate deploy script to install Pinniped Concierge"
log_note " --alternate-deploy-local-user-authenticator: specify an alternate deploy script to install Pinniped local-user-authenticator"
log_note " -h, --help: print this usage"
log_note " -c, --clean: destroy the current kind cluster and make a new one"
log_note " -g, --api-group-suffix: deploy Pinniped with an alternate API group suffix"
log_note " -s, --skip-build: reuse the most recently built image of the app instead of building"
log_note " -a, --get-active-directory-vars: specify a script that exports active directory environment variables"
log_note " --alternate-deploy: specify an alternate deploy script to install all components of Pinniped"
log_note " --pre-install: specify an pre-install script such as a build script"
exit 1
fi
pinniped_path="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$pinniped_path" || exit 1
#
# Check for dependencies
#
@@ -221,12 +172,33 @@ else
fi
registry="pinniped.local"
registry_with_port="$registry"
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
registry="kind-registry.local"
registry_with_port="$registry:5000"
fi
repo="test/build"
registry_repo="$registry/$repo"
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
registry_repo="$registry_with_port/$repo"
tag="0.0.0-$(uuidgen)" # always a new tag to force K8s to reload the image on redeploy
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
etc_hosts_local_registry_missing=no
if ! grep -q "$registry" /etc/hosts; then
etc_hosts_local_registry_missing=yes
fi
if [[ "$etc_hosts_local_registry_missing" == "yes" ]]; then
echo
log_error "In order to configure the kind cluster to use the local registry properly,"
log_error "please run this command to edit /etc/hosts, and then run this script again with the same options."
echo "sudo bash -c \"echo '127.0.0.1 $registry' >> /etc/hosts\""
log_error "When you are finished with your Kind cluster, you can remove this line from /etc/hosts."
exit 1
fi
fi
if [[ "$skip_build" == "yes" ]]; then
most_recent_tag=$(docker images "$registry/$repo" --format "{{.Tag}}" | head -1)
most_recent_tag=$(docker images "$registry_repo" --format "{{.Tag}}" | head -1)
if [[ -n "$most_recent_tag" ]]; then
tag="$most_recent_tag"
do_build=no
@@ -243,7 +215,7 @@ registry_repo_tag="${registry_repo}:${tag}"
if [[ "$do_build" == "yes" ]]; then
# Rebuild the code
testing_version="${KUBE_GIT_VERSION:-}"
if [[ "$dockerfile_path" != "" ]]; then
if [[ "$dockerfile_path" != "" ]]; then
log_note "Docker building the app with dockerfile $dockerfile_path and KUBE_GIT_VERSION='$testing_version'"
DOCKER_BUILDKIT=1 docker build . --tag "$registry_repo_tag" --file "$dockerfile_path" --build-arg "KUBE_GIT_VERSION=$testing_version"
else
@@ -253,31 +225,39 @@ if [[ "$do_build" == "yes" ]]; then
fi
fi
# Load it into the cluster
log_note "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag" --name pinniped
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
# If registry used, push to the registry.
log_note "Loading the app's container image into the local registry ($registry_with_port)..."
docker push "$registry_repo_tag"
else
# Otherwise, side-load directly.
log_note "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag" --name pinniped
fi
if [ "$pre_install" != "undefined" ]; then
log_note "Calling the pre-install script with args: $tag $registry_with_port $repo ..."
$pre_install pre-install-script "$tag" $registry_with_port $repo
fi
#
# Deploy local-user-authenticator
#
manifest=/tmp/pinniped-local-user-authenticator.yaml
data_values_file=/tmp/local-user-authenticator-values.yml
cat <<EOF >"$data_values_file"
---
image_repo: $registry_repo
image_tag: $tag
EOF
if [ "$alternate_deploy" != "undefined" ] || [ "$alternate_deploy_local_user_authenticator" != "undefined" ] ; then
if [ "$alternate_deploy" != "undefined" ]; then
log_note "The Pinniped local-user-authenticator will be deployed with $alternate_deploy local-user-authenticator $tag..."
$alternate_deploy local-user-authenticator $tag
fi
if [ "$alternate_deploy_local_user_authenticator" != "undefined" ]; then
log_note "The Pinniped local-user-authenticator will be deployed with $alternate_deploy_local_user_authenticator local-user-authenticator $tag..."
$alternate_deploy_local_user_authenticator local-user-authenticator $tag
fi
if [ "$alternate_deploy" != "undefined" ]; then
log_note "The local-user-authenticator will be deployed with $alternate_deploy local-user-authenticator $tag $registry_with_port $repo $data_values_file ..."
$alternate_deploy local-user-authenticator "$tag" $registry_with_port $repo $data_values_file
else
log_note "Deploying the local-user-authenticator app to the cluster using kapp..."
pushd deploy/local-user-authenticator >/dev/null
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
ytt --file . --data-values-file "$data_values_file" >"$manifest"
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
popd >/dev/null
@@ -297,28 +277,15 @@ ytt --file . \
--data-value "pinny_ldap_password=$ldap_test_password" \
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
>"$manifest"
kapp deploy --yes --app tools --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
popd >/dev/null
test_username="test-username"
test_groups="test-group-0,test-group-1"
test_password="$(openssl rand -hex 16)"
log_note "Creating test user '$test_username'..."
kubectl create secret generic "$test_username" \
--namespace local-user-authenticator \
--from-literal=groups="$test_groups" \
--from-literal=passwordHash="$(htpasswd -nbBC 10 x "$test_password" | sed -e "s/^x://")" \
--dry-run=client \
--output yaml |
kubectl apply -f -
#
# Deploy the Pinniped Supervisor
#
manifest=/tmp/pinniped-supervisor.yaml
data_values_file=/tmp/supervisor-values.yml
supervisor_app_name="pinniped-supervisor"
supervisor_namespace="supervisor"
supervisor_custom_labels="{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}"
@@ -326,32 +293,27 @@ log_level="debug"
service_https_nodeport_port="443"
service_https_nodeport_nodeport="31243"
service_https_clusterip_port="443"
cat <<EOF >"$data_values_file"
---
app_name: $supervisor_app_name
namespace: $supervisor_namespace
api_group_suffix: $api_group_suffix
image_repo: $registry_repo
image_tag: $tag
log_level: $log_level
custom_labels: $supervisor_custom_labels
service_https_nodeport_port: $service_https_nodeport_port
service_https_nodeport_nodeport: $service_https_nodeport_nodeport
service_https_clusterip_port: $service_https_clusterip_port
EOF
if [ "$alternate_deploy" != "undefined" ] || [ "$alternate_deploy_supervisor" != "undefined" ] ; then
if [ "$alternate_deploy" != "undefined" ]; then
log_note "The Pinniped Supervisor will be deployed with $alternate_deploy pinniped-supervisor $tag..."
$alternate_deploy pinniped-supervisor $tag
fi
if [ "$alternate_deploy_supervisor" != "undefined" ]; then
log_note "The Pinniped Supervisor will be deployed with $alternate_deploy_supervisor pinniped-supervisor $tag..."
$alternate_deploy_supervisor pinniped-supervisor $tag
fi
if [ "$alternate_deploy" != "undefined" ]; then
log_note "The Pinniped Supervisor will be deployed with $alternate_deploy pinniped-supervisor $tag $registry_with_port $repo $data_values_file ..."
$alternate_deploy pinniped-supervisor "$tag" $registry_with_port $repo $data_values_file
else
log_note "Deploying the Pinniped Supervisor app to the cluster using kapp..."
pushd deploy/supervisor >/dev/null
ytt --file . \
--data-value "app_name=$supervisor_app_name" \
--data-value "namespace=$supervisor_namespace" \
--data-value "api_group_suffix=$api_group_suffix" \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \
--data-value "log_level=$log_level" \
--data-value-yaml "custom_labels=$supervisor_custom_labels" \
--data-value-yaml "service_https_nodeport_port=$service_https_nodeport_port" \
--data-value-yaml "service_https_nodeport_nodeport=$service_https_nodeport_nodeport" \
--data-value-yaml "service_https_clusterip_port=$service_https_clusterip_port" \
>"$manifest"
ytt --file . --data-values-file "$data_values_file" >"$manifest"
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
popd >/dev/null
@@ -361,41 +323,55 @@ fi
# Deploy the Pinniped Concierge
#
manifest=/tmp/pinniped-concierge.yaml
data_values_file=/tmp/concierge-values.yml
concierge_app_name="pinniped-concierge"
concierge_namespace="concierge"
webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authenticate"
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/master|control plane/ {print $NF}')"
concierge_custom_labels="{myConciergeCustomLabelName: myConciergeCustomLabelValue}"
log_level="debug"
cat <<EOF >"$data_values_file"
---
app_name: $concierge_app_name
namespace: $concierge_namespace
api_group_suffix: $api_group_suffix
log_level: $log_level
custom_labels: $concierge_custom_labels
image_repo: $registry_repo
image_tag: $tag
discovery_url: $discovery_url
EOF
if [ "$alternate_deploy" != "undefined" ] || [ "$alternate_deploy_concierge" != "undefined" ] ; then
if [ "$alternate_deploy" != "undefined" ]; then
log_note "The Pinniped Concierge will be deployed with $alternate_deploy pinniped-concierge $tag..."
$alternate_deploy pinniped-concierge $tag
fi
if [ "$alternate_deploy_concierge" != "undefined" ]; then
log_note "The Pinniped Concierge will be deployed with $alternate_deploy_concierge pinniped-concierge $tag..."
$alternate_deploy_concierge pinniped-concierge $tag
fi
if [ "$alternate_deploy" != "undefined" ]; then
log_note "The Pinniped Concierge will be deployed with $alternate_deploy pinniped-concierge $tag $registry_with_port $repo $data_values_file ..."
$alternate_deploy pinniped-concierge "$tag" $registry_with_port $repo $data_values_file
else
log_note "Deploying the Pinniped Concierge app to the cluster using kapp..."
pushd deploy/concierge >/dev/null
ytt --file . \
--data-value "app_name=$concierge_app_name" \
--data-value "namespace=$concierge_namespace" \
--data-value "api_group_suffix=$api_group_suffix" \
--data-value "log_level=$log_level" \
--data-value-yaml "custom_labels=$concierge_custom_labels" \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \
--data-value "discovery_url=$discovery_url" >"$manifest"
ytt --file . --data-values-file "$data_values_file" >"$manifest"
kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
popd >/dev/null
fi
#
# Create a test user in the local-user-authenticator and get its CA bundle.
#
log_note "Creating test user for local-user-authenticator..."
test_username="test-username"
test_groups="test-group-0,test-group-1"
test_password="$(openssl rand -hex 16)"
kubectl create secret generic "$test_username" \
--namespace local-user-authenticator \
--from-literal=groups="$test_groups" \
--from-literal=passwordHash="$(htpasswd -nbBC 10 x "$test_password" | sed -e "s/^x://")" \
--dry-run=client \
--output yaml |
kubectl apply -f -
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
#
# Download the test CA bundle that was generated in the Dex pod.
# Note that this returns a base64 encoded value.
@@ -500,4 +476,8 @@ log_note "You can rerun this script to redeploy local production code changes wh
log_note
log_note "To delete the deployments, run:"
log_note " kapp delete -a local-user-authenticator -y && kapp delete -a $concierge_app_name -y && kapp delete -a $supervisor_app_name -y"
log_note "When you're finished, use './hack/kind-down.sh' to tear down the cluster."
if [[ "${PINNIPED_USE_LOCAL_KIND_REGISTRY:-}" != "" ]]; then
log_note "When you're finished, use 'PINNIPED_USE_LOCAL_KIND_REGISTRY=1 ./hack/kind-down.sh' to tear down the cluster."
else
log_note "When you're finished, use './hack/kind-down.sh' to tear down the cluster."
fi

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
# Copyright 2021-2023 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
@@ -26,6 +26,8 @@ LOCAL_HOST="127.0.0.1:${LOCAL_PORT}"
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT"
source hack/lib/helpers.sh
# Build the CLI for use later in the script.
go build ./cmd/pinniped
@@ -85,10 +87,10 @@ EOF
# Wait for the CredentialIssuer's impersonator status to update to be successful.
while [[ -z "$(kubectl get credentialissuer pinniped-concierge-config -o json |
jq '.status.strategies[] | select((.type=="ImpersonationProxy") and (.status=="Success"))')" ]]; do
echo "Waiting for a successful ImpersonationProxy strategy on CredentialIssuer..."
log_note "Waiting for a successful ImpersonationProxy strategy on CredentialIssuer..."
sleep 2
done
echo "Impersonator is available on https://${LOCAL_HOST}"
log_note "Impersonator is available on https://${LOCAL_HOST}"
# Make the impersonation proxy's port from the inside the cluster available locally.
kubectl port-forward -n $CONCIERGE_NAMESPACE deployment/$CONCIERGE_DEPLOYMENT ${LOCAL_PORT}:${IMPERSONATION_PROXY_PORT} &
@@ -97,14 +99,14 @@ port_forward_pid=$!
# Kill the kubectl port-forward command whenever the script is control-c cancelled or otherwise ends.
function cleanup() {
echo
echo "Cleaning up cluster resources..."
log_note "Cleaning up cluster resources..."
kubectl delete secret -n $LOCAL_USER_AUTHENTICATOR_NAMESPACE pinny-the-seal
kubectl delete configmap -n $CONCIERGE_NAMESPACE pinniped-concierge-impersonation-proxy-config
kubectl delete clusterrolebinding pinny-the-seal-can-edit
kubectl delete webhookauthenticator local-user-authenticator
echo "Stopping kubectl port-forward and exiting..."
log_note "Stopping kubectl port-forward and exiting..."
# It may have already shut down, so ignore errors.
kill -9 $port_forward_pid &> /dev/null || true
kill -9 $port_forward_pid &>/dev/null || true
}
trap cleanup EXIT
@@ -113,7 +115,7 @@ trap cleanup EXIT
--static-token "pinny-the-seal:password123" \
--concierge-mode ImpersonationProxy >/tmp/kubeconfig
echo
echo 'Ready. In another tab, use "kubectl --kubeconfig /tmp/kubeconfig <cmd>" to make requests through the impersonation proxy.'
echo "When done, cancel with ctrl-C to clean up."
log_note
log_note 'Ready. In another tab, use "kubectl --kubeconfig /tmp/kubeconfig <cmd>" to make requests through the impersonation proxy.'
log_note "When done, cancel with ctrl-C to clean up."
wait $port_forward_pid

View File

@@ -37,15 +37,7 @@ set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT"
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
source hack/lib/helpers.sh
use_oidc_upstream=no
use_ldap_upstream=no
@@ -122,8 +114,8 @@ if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then
# Wait for its pods to be ready.
echo "Waiting for Contour to be ready..."
kubectl wait --for 'jsonpath={.status.phase}=Succeeded' pods -l 'app=contour-certgen' -n projectcontour --timeout 60s
kubectl wait --for 'jsonpath={.status.phase}=Running' pods -l 'app!=contour-certgen' -n projectcontour --timeout 60s
kubectl wait --for 'jsonpath={.status.phase}=Succeeded' pods -l 'app=contour-certgen' -n projectcontour --timeout 60s
kubectl wait --for 'jsonpath={.status.phase}=Running' pods -l 'app!=contour-certgen' -n projectcontour --timeout 60s
# Create an ingress for the Supervisor which uses TLS passthrough to allow the Supervisor to terminate TLS.
cat <<EOF | kubectl apply --namespace "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" -f -
@@ -320,7 +312,7 @@ kubectl create secret tls -n "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" my-federation
# Make a FederationDomain using the TLS Secret and identity providers from above in a temp file.
fd_file="/tmp/federationdomain.yaml"
cat << EOF > $fd_file
cat <<EOF >$fd_file
apiVersion: config.supervisor.pinniped.dev/v1alpha1
kind: FederationDomain
metadata:
@@ -334,7 +326,7 @@ EOF
if [[ "$use_oidc_upstream" == "yes" ]]; then
# Indenting the heredoc by 4 spaces to make it indented the correct amount in the FederationDomain below.
cat << EOF >> $fd_file
cat <<EOF >>$fd_file
- displayName: "My OIDC IDP 🚀"
objectRef:
@@ -358,7 +350,7 @@ fi
if [[ "$use_ldap_upstream" == "yes" ]]; then
# Indenting the heredoc by 4 spaces to make it indented the correct amount in the FederationDomain below.
cat << EOF >> $fd_file
cat <<EOF >>$fd_file
- displayName: "My LDAP IDP 🚀"
objectRef:
@@ -412,7 +404,7 @@ fi
if [[ "$use_ad_upstream" == "yes" ]]; then
# Indenting the heredoc by 4 spaces to make it indented the correct amount in the FederationDomain below.
cat << EOF >> $fd_file
cat <<EOF >>$fd_file
- displayName: "My AD IDP"
objectRef: