From 11bd69cf2d772d9e721437bd1328ae03eb3eed37 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Tue, 29 Oct 2024 12:24:30 -0700 Subject: [PATCH] initial commit on ci branch: migrates code from private repo --- .gitignore | 21 +- AD-SETUP.md | 602 ++++ CODE_OF_CONDUCT.md | 1 + CONTRIBUTING.md | 1 + LICENSE | 202 ++ MAINTAINERS.md | 1 + README.md | 181 +- SECURITY.md | 1 + dockerfiles/code-coverage-uploader/Dockerfile | 14 + dockerfiles/crane/Dockerfile | 10 + .../deployment-yaml-formatter/Dockerfile | 16 + dockerfiles/eks-deployer/Dockerfile | 25 + dockerfiles/gh-cli/Dockerfile | 15 + .../integration-test-runner-beta/Dockerfile | 80 + .../integration-test-runner/Dockerfile | 80 + dockerfiles/k8s-app-deployer/Dockerfile | 34 + dockerfiles/k8s-code-generator/Dockerfile | 20 + dockerfiles/k8s-code-generator/setup.sh | 89 + dockerfiles/pool-trigger-resource/Dockerfile | 17 + .../pool-trigger-resource/assets/check | 219 ++ .../pool-trigger-resource/assets/common.sh | 28 + dockerfiles/pool-trigger-resource/assets/in | 2 + dockerfiles/pool-trigger-resource/assets/out | 2 + dockerfiles/test-bitnami-ldap/Dockerfile | 4 + dockerfiles/test-cfssl/Dockerfile | 28 + dockerfiles/test-dex/Dockerfile | 4 + dockerfiles/test-forward-proxy/Dockerfile | 13 + dockerfiles/test-forward-proxy/squid.conf | 56 + dockerfiles/test-kubectl/Dockerfile | 4 + hack/approve-and-merge.sh | 38 + hack/create-gke-acceptance-env.sh | 44 + hack/edit-gcloud-secret.sh | 69 + hack/fly-helpers.sh | 58 + hack/get-aws-ad-env-vars.sh | 43 + hack/get-github-env-vars.sh | 39 + hack/list-all-running-jobs.sh | 20 + hack/pinniped-pre-commit.sh | 37 + hack/prepare-for-uninstall-test.sh | 139 + ...re-remote-cluster-for-integration-tests.sh | 248 ++ hack/remote-workstation/create.sh | 43 + hack/remote-workstation/delete.sh | 21 + hack/remote-workstation/lib/deps.sh | 96 + hack/remote-workstation/rsync-to-local.sh | 59 + hack/remote-workstation/rsync.sh | 58 + hack/remote-workstation/ssh.sh | 22 + hack/remote-workstation/start.sh | 20 + hack/remote-workstation/stop.sh | 20 + hack/run-integration-tests.sh | 87 + hack/setup-fly.sh | 31 + infra/README.md | 60 + infra/concourse-install/bootstrap-secrets.sh | 72 + .../delete-concourse-internal-workers.sh | 65 + .../deploy-concourse-internal-workers.sh | 109 + .../concourse-install/deploy-concourse-web.sh | 120 + .../init-container-overlay-workers.yaml | 24 + .../internal-workers/values-workers.yaml | 79 + .../ytt-helm-postrender-workers.sh | 10 + .../scale-down-concourse-internal-workers.sh | 65 + .../scale-print-concourse-internal-workers.sh | 89 + .../scale-up-concourse-internal-workers.sh | 65 + .../web/init-container-overlay-web.yaml | 24 + infra/concourse-install/web/values-web.yaml | 119 + .../web/ytt-helm-postrender-web.sh | 10 + infra/terraform/gcloud/.terraform.lock.hcl | 64 + infra/terraform/gcloud/README.md | 28 + infra/terraform/gcloud/address/main.tf | 26 + infra/terraform/gcloud/address/outputs.tf | 10 + infra/terraform/gcloud/address/variables.tf | 12 + infra/terraform/gcloud/cluster/main.tf | 124 + infra/terraform/gcloud/cluster/outputs.tf | 10 + infra/terraform/gcloud/cluster/variables.tf | 25 + infra/terraform/gcloud/cluster/vpc/main.tf | 69 + infra/terraform/gcloud/cluster/vpc/outputs.tf | 22 + .../terraform/gcloud/cluster/vpc/variables.tf | 32 + infra/terraform/gcloud/database/main.tf | 78 + infra/terraform/gcloud/database/outputs.tf | 30 + infra/terraform/gcloud/database/variables.tf | 37 + infra/terraform/gcloud/gcp.tf | 33 + infra/terraform/gcloud/main.tf | 61 + infra/terraform/gcloud/outputs.tf | 54 + infra/terraform/gcloud/variables.tf | 28 + pipelines/cleanup-aws/pipeline.yml | 68 + pipelines/cleanup-aws/update-pipeline.sh | 12 + pipelines/concourse-workers/pipeline.yml | 122 + .../scale-down-gke-replicas.yml | 13 + .../scale-up-gke-replicas.yml | 13 + .../concourse-workers/update-pipeline.sh | 12 + pipelines/dockerfile-builders/pipeline.yml | 1167 +++++++ .../dockerfile-builders/update-pipeline.sh | 16 + pipelines/go-compatibility/pipeline.yml | 146 + pipelines/go-compatibility/update-pipeline.sh | 13 + pipelines/kind-node-builder/pipeline.yml | 109 + .../kind-node-builder/update-pipeline.sh | 16 + pipelines/main/pipeline.yml | 2788 +++++++++++++++++ pipelines/main/update-pipeline.sh | 16 + pipelines/pull-requests/pipeline.yml | 1914 +++++++++++ pipelines/pull-requests/update-pipeline.sh | 15 + pipelines/security-scan/pipeline.yml | 267 ++ pipelines/security-scan/update-pipeline.sh | 16 + .../prepare-cluster-for-integration-tests.sh | 1327 ++++++++ .../test-binaries-image/Dockerfile | 27 + .../Dockerfile.dockerignore | 10 + .../test-binaries-image/Dockerfile_fips | 31 + .../shared-tasks/build-cli-binaries/task.sh | 108 + .../shared-tasks/build-cli-binaries/task.yml | 23 + .../build-kind-node-image/build-image.sh | 85 + .../build-kind-node-image/task.sh | 43 + .../build-kind-node-image/task.yml | 22 + .../check-dockerfile-deps-updated/task.sh | 63 + .../check-dockerfile-deps-updated/task.yml | 14 + .../check-golang-deps-updated/task.sh | 17 + .../check-golang-deps-updated/task.yml | 19 + pipelines/shared-tasks/cleanup-aws/task.sh | 95 + pipelines/shared-tasks/cleanup-aws/task.yml | 20 + .../confirm-built-with-fips/task.sh | 55 + .../confirm-built-with-fips/task.yml | 15 + .../shared-tasks/confirm-version/task.yml | 38 + pipelines/shared-tasks/copy-image/task.sh | 74 + pipelines/shared-tasks/copy-image/task.yml | 18 + .../create-kind-node-builder-vm/task.sh | 31 + .../create-kind-node-builder-vm/task.yml | 16 + .../shared-tasks/create-or-update-pr/task.sh | 103 + .../shared-tasks/create-or-update-pr/task.yml | 14 + .../shared-tasks/deploy-aks-cluster/task.sh | 57 + .../shared-tasks/deploy-aks-cluster/task.yml | 18 + .../shared-tasks/deploy-eks-cluster/task.sh | 133 + .../shared-tasks/deploy-eks-cluster/task.yml | 17 + .../shared-tasks/deploy-gke-cluster/task.sh | 77 + .../shared-tasks/deploy-gke-cluster/task.yml | 18 + .../deploy-kind-cluster-vm/gce-init.sh | 206 ++ .../deploy-kind-cluster-vm/task.sh | 86 + .../deploy-kind-cluster-vm/task.yml | 21 + .../deploy-to-acceptance-gke/task.sh | 96 + .../deploy-to-acceptance-gke/task.yml | 106 + .../task.sh | 335 ++ .../task.yml | 22 + .../deploy-to-integration/task.sh | 56 + .../deploy-to-integration/task.yml | 105 + pipelines/shared-tasks/detach-cluster/task.sh | 18 + .../shared-tasks/detach-cluster/task.yml | 10 + .../export-cluster-diagnostics/task.sh | 67 + .../export-cluster-diagnostics/task.yml | 19 + .../shared-tasks/format-release/task.yml | 89 + .../generate-pinniped-password/task.sh | 9 + .../generate-pinniped-password/task.yml | 12 + .../sample-federation-domain.yaml | 26 + .../task.sh | 86 + .../task.yml | 16 + pipelines/shared-tasks/kapp-delete/task.sh | 8 + pipelines/shared-tasks/kapp-delete/task.yml | 12 + .../shared-tasks/pre-warm-cluster/task.sh | 40 + .../shared-tasks/pre-warm-cluster/task.yml | 10 + .../shared-tasks/remove-aks-cluster/task.sh | 19 + .../shared-tasks/remove-aks-cluster/task.yml | 16 + .../shared-tasks/remove-eks-cluster/task.sh | 26 + .../shared-tasks/remove-eks-cluster/task.yml | 16 + .../shared-tasks/remove-gce-worker-vm/task.sh | 22 + .../remove-gce-worker-vm/task.yml | 16 + .../shared-tasks/remove-gke-cluster/task.sh | 23 + .../shared-tasks/remove-gke-cluster/task.yml | 15 + .../remove-kind-cluster-vm/task.sh | 22 + .../remove-kind-cluster-vm/task.yml | 16 + .../remove-orphaned-kind-cluster-vms/task.sh | 127 + .../remove-orphaned-kind-cluster-vms/task.yml | 17 + .../shared-tasks/run-go-vuln-scan/task.sh | 21 + .../shared-tasks/run-go-vuln-scan/task.yml | 18 + .../run-integration-tests/task.sh | 280 ++ .../run-integration-tests/task.yml | 31 + .../run-kubectl-uninstall/task.sh | 26 + .../run-kubectl-uninstall/task.yaml | 12 + ...-uninstall-from-existing-namespace-test.sh | 134 + .../run-uninstall-test/run-uninstall-test.sh | 121 + .../shared-tasks/run-uninstall-test/task.sh | 19 + .../shared-tasks/run-uninstall-test/task.yml | 15 + pipelines/shared-tasks/run-unit-tests/task.sh | 17 + .../shared-tasks/run-unit-tests/task.yml | 20 + .../shared-tasks/run-verify-codegen/task.sh | 31 + .../shared-tasks/run-verify-codegen/task.yml | 13 + .../run-verify-go-generate/task.sh | 28 + .../run-verify-go-generate/task.yml | 17 + .../run-verify-go-mod-tidy/task.sh | 28 + .../run-verify-go-mod-tidy/task.yml | 17 + .../shared-tasks/run-verify-lint/task.sh | 28 + .../shared-tasks/run-verify-lint/task.yml | 18 + .../shared-tasks/scan-image-trivy/task.yml | 37 + .../template-deployment-yamls/task.sh | 39 + .../template-deployment-yamls/task.yml | 16 + .../update-homebrew-formula/task.sh | 35 + .../update-homebrew-formula/task.yml | 18 + .../update-version-and-cli-docs/task.sh | 64 + .../update-version-and-cli-docs/task.yml | 18 + .../shared-tasks/upload-test-coverage/task.sh | 9 + .../upload-test-coverage/task.yml | 13 + pipelines/update-all-pipelines.sh | 29 + 194 files changed, 16873 insertions(+), 16 deletions(-) create mode 100644 AD-SETUP.md create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 MAINTAINERS.md create mode 100644 SECURITY.md create mode 100644 dockerfiles/code-coverage-uploader/Dockerfile create mode 100644 dockerfiles/crane/Dockerfile create mode 100644 dockerfiles/deployment-yaml-formatter/Dockerfile create mode 100644 dockerfiles/eks-deployer/Dockerfile create mode 100644 dockerfiles/gh-cli/Dockerfile create mode 100644 dockerfiles/integration-test-runner-beta/Dockerfile create mode 100644 dockerfiles/integration-test-runner/Dockerfile create mode 100644 dockerfiles/k8s-app-deployer/Dockerfile create mode 100644 dockerfiles/k8s-code-generator/Dockerfile create mode 100755 dockerfiles/k8s-code-generator/setup.sh create mode 100644 dockerfiles/pool-trigger-resource/Dockerfile create mode 100755 dockerfiles/pool-trigger-resource/assets/check create mode 100755 dockerfiles/pool-trigger-resource/assets/common.sh create mode 100755 dockerfiles/pool-trigger-resource/assets/in create mode 100755 dockerfiles/pool-trigger-resource/assets/out create mode 100644 dockerfiles/test-bitnami-ldap/Dockerfile create mode 100644 dockerfiles/test-cfssl/Dockerfile create mode 100644 dockerfiles/test-dex/Dockerfile create mode 100644 dockerfiles/test-forward-proxy/Dockerfile create mode 100644 dockerfiles/test-forward-proxy/squid.conf create mode 100644 dockerfiles/test-kubectl/Dockerfile create mode 100755 hack/approve-and-merge.sh create mode 100755 hack/create-gke-acceptance-env.sh create mode 100755 hack/edit-gcloud-secret.sh create mode 100644 hack/fly-helpers.sh create mode 100755 hack/get-aws-ad-env-vars.sh create mode 100755 hack/get-github-env-vars.sh create mode 100755 hack/list-all-running-jobs.sh create mode 100755 hack/pinniped-pre-commit.sh create mode 100755 hack/prepare-for-uninstall-test.sh create mode 100755 hack/prepare-remote-cluster-for-integration-tests.sh create mode 100755 hack/remote-workstation/create.sh create mode 100755 hack/remote-workstation/delete.sh create mode 100755 hack/remote-workstation/lib/deps.sh create mode 100755 hack/remote-workstation/rsync-to-local.sh create mode 100755 hack/remote-workstation/rsync.sh create mode 100755 hack/remote-workstation/ssh.sh create mode 100755 hack/remote-workstation/start.sh create mode 100755 hack/remote-workstation/stop.sh create mode 100755 hack/run-integration-tests.sh create mode 100755 hack/setup-fly.sh create mode 100644 infra/README.md create mode 100755 infra/concourse-install/bootstrap-secrets.sh create mode 100755 infra/concourse-install/delete-concourse-internal-workers.sh create mode 100755 infra/concourse-install/deploy-concourse-internal-workers.sh create mode 100755 infra/concourse-install/deploy-concourse-web.sh create mode 100644 infra/concourse-install/internal-workers/init-container-overlay-workers.yaml create mode 100644 infra/concourse-install/internal-workers/values-workers.yaml create mode 100755 infra/concourse-install/internal-workers/ytt-helm-postrender-workers.sh create mode 100755 infra/concourse-install/scale-down-concourse-internal-workers.sh create mode 100755 infra/concourse-install/scale-print-concourse-internal-workers.sh create mode 100755 infra/concourse-install/scale-up-concourse-internal-workers.sh create mode 100644 infra/concourse-install/web/init-container-overlay-web.yaml create mode 100644 infra/concourse-install/web/values-web.yaml create mode 100755 infra/concourse-install/web/ytt-helm-postrender-web.sh create mode 100644 infra/terraform/gcloud/.terraform.lock.hcl create mode 100644 infra/terraform/gcloud/README.md create mode 100644 infra/terraform/gcloud/address/main.tf create mode 100644 infra/terraform/gcloud/address/outputs.tf create mode 100644 infra/terraform/gcloud/address/variables.tf create mode 100644 infra/terraform/gcloud/cluster/main.tf create mode 100644 infra/terraform/gcloud/cluster/outputs.tf create mode 100644 infra/terraform/gcloud/cluster/variables.tf create mode 100644 infra/terraform/gcloud/cluster/vpc/main.tf create mode 100644 infra/terraform/gcloud/cluster/vpc/outputs.tf create mode 100644 infra/terraform/gcloud/cluster/vpc/variables.tf create mode 100644 infra/terraform/gcloud/database/main.tf create mode 100644 infra/terraform/gcloud/database/outputs.tf create mode 100644 infra/terraform/gcloud/database/variables.tf create mode 100644 infra/terraform/gcloud/gcp.tf create mode 100644 infra/terraform/gcloud/main.tf create mode 100644 infra/terraform/gcloud/outputs.tf create mode 100644 infra/terraform/gcloud/variables.tf create mode 100644 pipelines/cleanup-aws/pipeline.yml create mode 100755 pipelines/cleanup-aws/update-pipeline.sh create mode 100644 pipelines/concourse-workers/pipeline.yml create mode 100644 pipelines/concourse-workers/scale-down-gke-replicas.yml create mode 100644 pipelines/concourse-workers/scale-up-gke-replicas.yml create mode 100755 pipelines/concourse-workers/update-pipeline.sh create mode 100644 pipelines/dockerfile-builders/pipeline.yml create mode 100755 pipelines/dockerfile-builders/update-pipeline.sh create mode 100644 pipelines/go-compatibility/pipeline.yml create mode 100755 pipelines/go-compatibility/update-pipeline.sh create mode 100644 pipelines/kind-node-builder/pipeline.yml create mode 100755 pipelines/kind-node-builder/update-pipeline.sh create mode 100644 pipelines/main/pipeline.yml create mode 100755 pipelines/main/update-pipeline.sh create mode 100644 pipelines/pull-requests/pipeline.yml create mode 100755 pipelines/pull-requests/update-pipeline.sh create mode 100644 pipelines/security-scan/pipeline.yml create mode 100755 pipelines/security-scan/update-pipeline.sh create mode 100755 pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh create mode 100644 pipelines/shared-helpers/test-binaries-image/Dockerfile create mode 100644 pipelines/shared-helpers/test-binaries-image/Dockerfile.dockerignore create mode 100644 pipelines/shared-helpers/test-binaries-image/Dockerfile_fips create mode 100755 pipelines/shared-tasks/build-cli-binaries/task.sh create mode 100644 pipelines/shared-tasks/build-cli-binaries/task.yml create mode 100755 pipelines/shared-tasks/build-kind-node-image/build-image.sh create mode 100755 pipelines/shared-tasks/build-kind-node-image/task.sh create mode 100644 pipelines/shared-tasks/build-kind-node-image/task.yml create mode 100755 pipelines/shared-tasks/check-dockerfile-deps-updated/task.sh create mode 100644 pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml create mode 100755 pipelines/shared-tasks/check-golang-deps-updated/task.sh create mode 100644 pipelines/shared-tasks/check-golang-deps-updated/task.yml create mode 100755 pipelines/shared-tasks/cleanup-aws/task.sh create mode 100644 pipelines/shared-tasks/cleanup-aws/task.yml create mode 100755 pipelines/shared-tasks/confirm-built-with-fips/task.sh create mode 100644 pipelines/shared-tasks/confirm-built-with-fips/task.yml create mode 100644 pipelines/shared-tasks/confirm-version/task.yml create mode 100755 pipelines/shared-tasks/copy-image/task.sh create mode 100644 pipelines/shared-tasks/copy-image/task.yml create mode 100755 pipelines/shared-tasks/create-kind-node-builder-vm/task.sh create mode 100644 pipelines/shared-tasks/create-kind-node-builder-vm/task.yml create mode 100755 pipelines/shared-tasks/create-or-update-pr/task.sh create mode 100644 pipelines/shared-tasks/create-or-update-pr/task.yml create mode 100755 pipelines/shared-tasks/deploy-aks-cluster/task.sh create mode 100644 pipelines/shared-tasks/deploy-aks-cluster/task.yml create mode 100755 pipelines/shared-tasks/deploy-eks-cluster/task.sh create mode 100644 pipelines/shared-tasks/deploy-eks-cluster/task.yml create mode 100755 pipelines/shared-tasks/deploy-gke-cluster/task.sh create mode 100644 pipelines/shared-tasks/deploy-gke-cluster/task.yml create mode 100644 pipelines/shared-tasks/deploy-kind-cluster-vm/gce-init.sh create mode 100755 pipelines/shared-tasks/deploy-kind-cluster-vm/task.sh create mode 100644 pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml create mode 100755 pipelines/shared-tasks/deploy-to-acceptance-gke/task.sh create mode 100644 pipelines/shared-tasks/deploy-to-acceptance-gke/task.yml create mode 100755 pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.sh create mode 100644 pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.yml create mode 100755 pipelines/shared-tasks/deploy-to-integration/task.sh create mode 100644 pipelines/shared-tasks/deploy-to-integration/task.yml create mode 100755 pipelines/shared-tasks/detach-cluster/task.sh create mode 100644 pipelines/shared-tasks/detach-cluster/task.yml create mode 100755 pipelines/shared-tasks/export-cluster-diagnostics/task.sh create mode 100644 pipelines/shared-tasks/export-cluster-diagnostics/task.yml create mode 100644 pipelines/shared-tasks/format-release/task.yml create mode 100755 pipelines/shared-tasks/generate-pinniped-password/task.sh create mode 100644 pipelines/shared-tasks/generate-pinniped-password/task.yml create mode 100644 pipelines/shared-tasks/install-and-configure-cert-manager/sample-federation-domain.yaml create mode 100755 pipelines/shared-tasks/install-and-configure-cert-manager/task.sh create mode 100644 pipelines/shared-tasks/install-and-configure-cert-manager/task.yml create mode 100755 pipelines/shared-tasks/kapp-delete/task.sh create mode 100644 pipelines/shared-tasks/kapp-delete/task.yml create mode 100755 pipelines/shared-tasks/pre-warm-cluster/task.sh create mode 100644 pipelines/shared-tasks/pre-warm-cluster/task.yml create mode 100755 pipelines/shared-tasks/remove-aks-cluster/task.sh create mode 100644 pipelines/shared-tasks/remove-aks-cluster/task.yml create mode 100755 pipelines/shared-tasks/remove-eks-cluster/task.sh create mode 100644 pipelines/shared-tasks/remove-eks-cluster/task.yml create mode 100755 pipelines/shared-tasks/remove-gce-worker-vm/task.sh create mode 100644 pipelines/shared-tasks/remove-gce-worker-vm/task.yml create mode 100755 pipelines/shared-tasks/remove-gke-cluster/task.sh create mode 100644 pipelines/shared-tasks/remove-gke-cluster/task.yml create mode 100755 pipelines/shared-tasks/remove-kind-cluster-vm/task.sh create mode 100644 pipelines/shared-tasks/remove-kind-cluster-vm/task.yml create mode 100755 pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.sh create mode 100644 pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml create mode 100755 pipelines/shared-tasks/run-go-vuln-scan/task.sh create mode 100644 pipelines/shared-tasks/run-go-vuln-scan/task.yml create mode 100755 pipelines/shared-tasks/run-integration-tests/task.sh create mode 100644 pipelines/shared-tasks/run-integration-tests/task.yml create mode 100755 pipelines/shared-tasks/run-kubectl-uninstall/task.sh create mode 100644 pipelines/shared-tasks/run-kubectl-uninstall/task.yaml create mode 100755 pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh create mode 100755 pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh create mode 100755 pipelines/shared-tasks/run-uninstall-test/task.sh create mode 100644 pipelines/shared-tasks/run-uninstall-test/task.yml create mode 100755 pipelines/shared-tasks/run-unit-tests/task.sh create mode 100644 pipelines/shared-tasks/run-unit-tests/task.yml create mode 100755 pipelines/shared-tasks/run-verify-codegen/task.sh create mode 100644 pipelines/shared-tasks/run-verify-codegen/task.yml create mode 100755 pipelines/shared-tasks/run-verify-go-generate/task.sh create mode 100644 pipelines/shared-tasks/run-verify-go-generate/task.yml create mode 100755 pipelines/shared-tasks/run-verify-go-mod-tidy/task.sh create mode 100644 pipelines/shared-tasks/run-verify-go-mod-tidy/task.yml create mode 100755 pipelines/shared-tasks/run-verify-lint/task.sh create mode 100644 pipelines/shared-tasks/run-verify-lint/task.yml create mode 100644 pipelines/shared-tasks/scan-image-trivy/task.yml create mode 100755 pipelines/shared-tasks/template-deployment-yamls/task.sh create mode 100644 pipelines/shared-tasks/template-deployment-yamls/task.yml create mode 100755 pipelines/shared-tasks/update-homebrew-formula/task.sh create mode 100644 pipelines/shared-tasks/update-homebrew-formula/task.yml create mode 100755 pipelines/shared-tasks/update-version-and-cli-docs/task.sh create mode 100644 pipelines/shared-tasks/update-version-and-cli-docs/task.yml create mode 100755 pipelines/shared-tasks/upload-test-coverage/task.sh create mode 100644 pipelines/shared-tasks/upload-test-coverage/task.yml create mode 100755 pipelines/update-all-pipelines.sh diff --git a/.gitignore b/.gitignore index 66fd13c90..a9fc24396 100644 --- a/.gitignore +++ b/.gitignore @@ -1,15 +1,6 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ +.idea +.terraform +*.tfstate.* +*.tfstate +kubeconfig.yaml +.DS_Store diff --git a/AD-SETUP.md b/AD-SETUP.md new file mode 100644 index 000000000..2957f076c --- /dev/null +++ b/AD-SETUP.md @@ -0,0 +1,602 @@ +# Creating an Active Directory server on Google Cloud for Pinniped integration tests + +This documents the steps that were taken to create our test AD server used by the integration tests. +The integration tests use LDAPS and StartTLS to connect to the AD server. + +## Create a Windows Server VM and configure it as an AD Domain Controller + +The steps in this section were mostly inspired by +https://cloud.google.com/architecture/deploy-an-active-directory-forest-on-compute-engine. + +From your Mac, create a VPC, subnet, firewall rules, admin password, reserved static IP, and the VM itself. + +On your Mac: + +```shell +# Login as yourself. +gcloud auth login + +# Set some variables. +project="REDACTED" # Change this to be the actual project name before running these commands. +region="us-central1" +zone="us-central1-b" +vpc_name="ad" + +# Create VPC. +gcloud compute networks create ${vpc_name} \ + --project ${project} \ + --description "VPC network to deploy Active Directory" \ + --subnet-mode custom + +# Create subnet. +# The google tutorial says to "enable Private Google Access so that Windows can activate without internet access." +gcloud compute networks subnets create domain-controllers \ + --project ${project} --region ${region} \ + --network ${vpc_name} \ + --range "10.0.0.0/28" \ + --enable-private-ip-google-access + +# Create a firewall rule to allow RDP. Find out what your public IP address is by going to https://whatismyipaddress.com. +# Copy/paste your IPv4 address into this rule. Replace the X.X.X.X placeholder address shown here with your real IP. +gcloud compute firewall-rules create allow-rdp-ingress-to-addc \ + --project ${project} \ + --direction INGRESS \ + --action allow \ + --rules tcp:3389 \ + --source-ranges "X.X.X.X/32" \ + --target-tags ad-domaincontroller \ + --network ${vpc_name} \ + --priority 10000 + +# Allow LDAPS (port 636) from the whole internet. +gcloud compute firewall-rules create allow-ldaps-ingress-to-addc \ + --project ${project} \ + --direction INGRESS \ + --action allow \ + --rules tcp:636 \ + --source-ranges "0.0.0.0/0" \ + --target-tags ad-domaincontroller \ + --network ${vpc_name} \ + --priority 10000 + +# Allow LDAP (port 389) from the whole internet, to allow the integration tests to use StartTLS. +gcloud compute firewall-rules create allow-ldap-ingress-to-addc \ + --project ${project} \ + --direction INGRESS \ + --action allow \ + --rules tcp:389 \ + --source-ranges "0.0.0.0/0" \ + --target-tags ad-domaincontroller \ + --network ${vpc_name} \ + --priority 10000 + +# Reserve a static public IP address for the domain controller VM. +addressOfDc1=$(gcloud compute addresses create ad-domain-controller \ + --project ${project} --region ${region} \ + --format="value(address)") + +# Create an admin password for the Administrator user on Windows, and save it to secrets manager. +password="$(openssl rand -hex 8)-$(openssl rand -hex 8)" +echo -n "$password" > password.tmp +gcloud secrets create active-directory-dc1-password \ + --project ${project} \ + --data-file password.tmp +rm password.tmp + +# This creates a service account called ad-domaincontroller@PROJECT_NAME.iam.gserviceaccount.com +# (where PROJECT_NAME is the actual GCP project name) and sets the account name to the +# variable $dcServiceAccount. +dcServiceAccount=$(gcloud iam service-accounts create ad-domaincontroller \ + --project ${project} \ + --display-name "AD Domain Controller VM Service Account" \ + --format "value(email)") + +# Allow the new service account to temporarily read the Windows admin password from secret manager. +# The following `date` command might only work on MacOS. It prints the time like this: 2024-10-23T19:20:36Z +one_hour_from_now=$(TZ=UTC date -v "+1H" +"%Y-%m-%dT%H:%M:%SZ") +gcloud secrets add-iam-policy-binding active-directory-dc1-password \ + --project ${project} \ + "--member=serviceAccount:$dcServiceAccount" \ + --role=roles/secretmanager.secretAccessor \ + --condition="title=Expires after 1h,expression=request.time < timestamp('$one_hour_from_now')" + +# Optional: list all bindings to see the binding that you just created. +gcloud secrets get-iam-policy active-directory-dc1-password \ + --project ${project} + +# Create a powershell startup script in a local file. +cat <<"EOF" > dc-startup.ps1 +$ErrorActionPreference = "Stop" + +# +# Only run the script if the VM is not a domain controller already. +# +if ((Get-CimInstance -ClassName Win32_OperatingSystem).ProductType -eq 2) { + exit +} + +# +# Read configuration from metadata. +# +Import-Module "${Env:ProgramFiles}\Google\Compute Engine\sysprep\gce_base.psm1" + +Write-Host "Reading metadata..." +$ActiveDirectoryDnsDomain = Get-MetaData -Property "attributes/ActiveDirectoryDnsDomain" -instance_only +$ActiveDirectoryNetbiosDomain = Get-MetaData -Property "attributes/ActiveDirectoryNetbiosDomain" -instance_only +$ProjectId = Get-MetaData -Property "project-id" -project_only +$AccessToken = (Get-MetaData -Property "service-accounts/default/token" | ConvertFrom-Json).access_token + +# +# Read the DSRM password from secret manager. +# +Write-Host "Reading secret from secret manager..." +$Secret = (Invoke-RestMethod ` + -Headers @{ + "Metadata-Flavor" = "Google"; + "x-goog-user-project" = $ProjectId; + "Authorization" = "Bearer $AccessToken"} ` + -Uri "https://secretmanager.googleapis.com/v1/projects/$ProjectId/secrets/active-directory-dc1-password/versions/latest:access") +$DsrmPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Secret.payload.data)) +$DsrmPassword = ConvertTo-SecureString -AsPlainText $DsrmPassword -force + +# +# Promote. +# +Write-Host "Setting administrator password..." +Set-LocalUser -Name Administrator -Password $DsrmPassword + +Write-Host "Creating a new forest $ActiveDirectoryDnsDomain ($ActiveDirectoryNetbiosDomain)..." +Install-ADDSForest ` + -DomainName $ActiveDirectoryDnsDomain ` + -DomainNetbiosName $ActiveDirectoryNetbiosDomain ` + -SafeModeAdministratorPassword $DsrmPassword ` + -DomainMode Win2008R2 ` + -ForestMode Win2008R2 ` + -InstallDns ` + -CreateDnsDelegation:$False ` + -NoRebootOnCompletion:$True ` + -Confirm:$false + +# +# Configure DNS. +# +Write-Host "Configuring DNS settings..." +Get-Netadapter| Disable-NetAdapterBinding -ComponentID ms_tcpip6 +Set-DnsClientServerAddress ` + -InterfaceIndex (Get-NetAdapter -Name Ethernet).InterfaceIndex ` + -ServerAddresses 127.0.0.1 + +# +# Enable LSA protection. +# +New-ItemProperty ` + -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Lsa" ` + -Name "RunAsPPL" ` + -Value 1 ` + -PropertyType DWord + +Write-Host "Restarting to apply all settings..." +Restart-Computer +EOF + +# Create a domain controller VM. +# E2 are the cheapest VMs. e2-medium has 2 vCPUs (shared with other customers) and 4 GB of memory. +# See https://cloud.google.com/compute/docs/general-purpose-machines#e2-shared-core. +# When we originally set up this VM, we actually started it as n2-standard-2 and after we +# finished setting up everything as shown in this guide, then we stopped the VM and changed its +# type to e2-medium and started the VM again. Maybe it would work fine to create it as +# e2-medium from the beginning, but note that we didn't actually test that. +gcloud compute instances create active-directory-dc1 \ + --project ${project} \ + --zone ${zone} \ + --image-family windows-2022 \ + --image-project windows-cloud \ + --machine-type e2-medium \ + --tags ad-domaincontroller \ + --metadata "ActiveDirectoryDnsDomain=activedirectory.test.pinniped.dev,ActiveDirectoryNetbiosDomain=pinniped-ad,sysprep-specialize-script-ps1=Install-WindowsFeature AD-Domain-Services -IncludeManagementTools; Install-WindowsFeature DNS,disable-account-manager=true" \ + --metadata-from-file windows-startup-script-ps1=dc-startup.ps1 \ + --address ${addressOfDc1} \ + --subnet=domain-controllers \ + --service-account "$dcServiceAccount" \ + --scopes cloud-platform \ + --shielded-integrity-monitoring \ + --shielded-secure-boot \ + --shielded-vtpm + +# Monitor the initialization process of the first domain controller by viewing its serial port output. +# It should install the sysprep stuff, reboot, run our startup script, and then reboot again. +gcloud compute instances tail-serial-port-output active-directory-dc1 \ + --project ${project} \ + --zone ${zone} +# Use CTRL-C to cancel tailing the output. +``` + +## Update DNS + +Update the Cloud DNS entry for `activedirectory.test.pinniped.dev.` to be an "A" record pointing to the +public static IP of the VM. This is easier to do in the Cloud DNS UI in your browser. +It would take many gcloud CLI commands to accomplish the same task. + +## Configure test users and groups + +Make sure you have an RDP client installed. On a Mac, you can install RDP from the App Store. +It was recently renamed "Windows App". + +Note: To copy/paste in the RDP client, you may need to use CTRL-C/CTRL-V if CMD-C/CMD-V don't work. + +RDP into the Windows VM. To connect, use `activedirectory.test.pinniped.dev` as the name of the server, +the username `Administrator`, and the password from the `active-directory-dc1-password` entry in Secrets Manager. +You can ignore the RDP certificate error. + +In your RDP session, open Powershell. Then run the following commands to add some users and groups, +change the password policy, and grant some permissions. + +Before running the commands, replace the redacted passwords as follows: +- The value for `REDACTED_BIND_USER_PASSWORD` can be found at `aws-ad-bind-account-password` in the `concourse-secrets` secret +- The value for `REDACTED_PINNY_USER_PASSWORD` can be found at `aws-ad-user-password` in the `concourse-secrets` secret +- The value for `REDACTED_DEACTIVATED_USER_PASSWORD` can be found at `aws-ad-deactivated-user-password` in the `concourse-secrets` secret + +```shell +New-ADOrganizationalUnit -Name "pinniped-ad" ` + -ProtectedFromAccidentalDeletion $false + +New-ADOrganizationalUnit -Name "Users" ` + -Path "OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" ` + -ProtectedFromAccidentalDeletion $false + +New-ADOrganizationalUnit -Name "test-users" ` + -Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" ` + -Description "integration tests will create and delete ephemeral users here" ` + -ProtectedFromAccidentalDeletion $false + +# Print all OUs to validate that they were created. +Get-ADOrganizationalUnit -Filter * + +New-ADUser -Name "Bind User" -SamAccountName "bind-user" -GivenName "Bind" -Surname "User" -DisplayName "Bind User" ` + -UserPrincipalName "bind-user@activedirectory.test.pinniped.dev" ` + -Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" ` + -AccountPassword (ConvertTo-SecureString "REDACTED_BIND_USER_PASSWORD" -AsPlainText -Force) ` + -Enabled $true + +# Note that the value of EmailAddress is not a real email address, but that's okay. +New-ADUser -Name "Pinny Seal" -SamAccountName "pinny" -GivenName "Pinny" -Surname "Seal" -DisplayName "Pinny Seal" ` + -UserPrincipalName "pinny@activedirectory.test.pinniped.dev" ` + -Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" ` + -EmailAddress "tanzu-user-authentication@groups.vmware.com" ` + -AccountPassword (ConvertTo-SecureString "REDACTED_PINNY_USER_PASSWORD" -AsPlainText -Force) ` + -Enabled $true + +New-ADUser -Name "Deactivated User" -SamAccountName "deactivated-user" -GivenName "Deactivated" -Surname "User" -DisplayName "Deactivated User" ` + -UserPrincipalName "deactivated-user@activedirectory.test.pinniped.dev" ` + -Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" ` + -AccountPassword (ConvertTo-SecureString "REDACTED_DEACTIVATED_USER_PASSWORD" -AsPlainText -Force) ` + -Enabled $false + +# Take note of the pinny account's ObjectGUID. You will need to edit the concourse-secrets secret later to update this GUID value. +# This value should look something like "288188dd-ab76-4f61-b6e4-c72e081502c5". +Get-ADUser pinny -Properties * | Select SamaccountName,ObjectGUID + +# Print all users to validate that they were created. +Get-ADUser -Filter * + +New-ADGroup -Name "Marine Mammals" -SamAccountName "Marine Mammals" -DisplayName "Marine Mammals" ` + -GroupCategory Security -GroupScope Global ` + -Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" + +Add-ADGroupMember -Identity "Marine Mammals" -Members "pinny" + +New-ADGroup -Name "Mammals" -SamAccountName "Mammals" -DisplayName "Mammals" ` + -GroupCategory Security -GroupScope Global ` + -Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" + +Add-ADGroupMember -Identity "Mammals" -Members "Marine Mammals" + +# Change the default password policy. There are some integration tests that rely on this. +# This is the equivalent of doing this in the Windows "Active Directory Administrative Center" UI: +# check "enforce account lockout policy", give it 20 failed attempts and a 15-minute reset, then +# uncheck "enforce minimum password age" so we can change the password immediately upon creating a user. +Set-ADDefaultDomainPasswordPolicy -Identity "activedirectory.test.pinniped.dev" ` + -LockoutThreshold 20 -LockoutDuration "00:15:00" -LockoutObservationWindow "00:15:00" ` + -MinPasswordAge 0 + +# Print the policy to validate that it was updated. +Get-ADDefaultDomainPasswordPolicy + +# We need to allow the bind-user to create/delete/edit users and groups within the test-users OU, because several +# integration tests want to crate/delete/edit ephemeral test users and groups. +# These access control steps were inspired by https://the-itguy.de/delegate-access-in-active-directory-with-powershell/. +# This is intended to be the equivalent of using the UI to assign permissions like this: right click on "test-users", +# select Delegate Control, select "bind-user" as the user, select "create, delete and manage user accounts" and +# "reset user passwords" as the tasks to delegate. +function New-ADDGuidMap +{ + $rootdse = Get-ADRootDSE + $guidmap = @{ } + $GuidMapParams = @{ + SearchBase = ($rootdse.SchemaNamingContext) + LDAPFilter = "(schemaidguid=*)" + Properties = ("lDAPDisplayName", "schemaIDGUID") + } + Get-ADObject @GuidMapParams | ForEach-Object { $guidmap[$_.lDAPDisplayName] = [System.GUID]$_.schemaIDGUID } + return $guidmap +} +$GuidMap = New-ADDGuidMap +$BindUserSID = New-Object System.Security.Principal.SecurityIdentifier (Get-ADUser "bind-user").SID +$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" +$ace1 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["user"] +$ace2 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["user"], "All" +$ace3 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["group"] +$ace4 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["group"], "All" +$acl.AddAccessRule($ace1) +$acl.AddAccessRule($ace2) +$acl.AddAccessRule($ace3) +$acl.AddAccessRule($ace4) +Set-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" -AclObject $acl + +# Print the access control rules that were just applied. +$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" +$acl.Access | Where-Object { $_.IdentityReference -eq "pinniped-ad\bind-user" } +``` + +If you would like to see these OUs, users, and groups in the UI, you can open the "Active Directory Users and Computers" +app in your RDP session. + +## Configure a CA and a serving certificate for LDAPS + +Now we need to create and configure a TLS serving certificate for LDAPS. + +The certificate needs to include two hostnames. One of the hostnames is the name that the AD server +thinks is its own hostname (`active-directory-dc1.activedirectory.test.pinniped.dev`). +This is how the AD server will decide to use this cert for the LDAPS port. +The other hostname is the one that clients will use when making connections from the outside +(`activedirectory.test.pinniped.dev`) so they can validate the server certificate. + +The steps here were inspired by https://gist.github.com/magnetikonline/0ccdabfec58eb1929c997d22e7341e45. + +On your mac: + +```shell +# On your Mac: Create a self-signed CA public/private keypair. +openssl req -x509 -newkey rsa:4096 \ + -keyout ad-ca.key -out ad-ca.crt \ + -sha256 -days 36500 -nodes \ + -subj "/C=US/ST=California/L=San Francisco/O=Pinniped/OU=Pinniped CI/CN=Pinniped AD CA" + +# Copy the public key to your clipboard. +cat ad-ca.crt| pbcopy +``` + +In Powershell terminal: + +```shell +# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file. +# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the public +# key again before you hit return for this command. +Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\ca.crt" + +# In Powershell terminal, check that the file exists and looks correct. +type "C:\users\administrator\desktop\ca.crt" + +# Import root certificate into trusted store of domain controller in your Powershell terminal: +Import-Certificate -FilePath "C:\users\administrator\desktop\ca.crt" -CertStoreLocation Cert:\LocalMachine\Root +``` + +If you want to validate that this was imported, open the UI tool called "Manage computer certificates" +and look in the folder called "Trusted Root Certification Authorities\Certificates". +If the UI was already open, click the refresh button. + +Copy the following file contents to your clipboard: + +```shell +[Version] +Signature="$Windows NT$" + +[NewRequest] +Subject = "CN=activedirectory.test.pinniped.dev" +KeySpec = 1 +KeyLength = 2048 +Exportable = TRUE +MachineKeySet = TRUE +SMIME = FALSE +PrivateKeyArchive = FALSE +UserProtected = FALSE +UseExistingKeySet = FALSE +ProviderName = "Microsoft RSA SChannel Cryptographic Provider" +ProviderType = 12 +RequestType = PKCS10 +KeyUsage = 0xa0 + +[EnhancedKeyUsageExtension] +OID = 1.3.6.1.5.5.7.3.1 ; Server Authentication + +[Extensions] +2.5.29.17 = "{text}" +_continue_ = "DNS=activedirectory.test.pinniped.dev" +_continue_ = "DNS=active-directory-dc1.activedirectory.test.pinniped.dev" +``` + +In Powershell terminal: + +```shell +# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file. +# Note that if you copy/paste this command to your RDP session, then you need to copy the file contents +# from above again before you hit return for this command. +Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\request.inf" + +# In Powershell terminal, check that the file exists and looks correct. +type "C:\users\administrator\desktop\request.inf" + +# Create a CSR. This command will also generate a private key for the AD server and save it. +certreq -new "C:\users\administrator\desktop\request.inf" "C:\users\administrator\desktop\client.csr" + +# Show the CSR. +type "C:\users\administrator\desktop\client.csr" + +# Copy the content of this file to your clipboard. +Get-Content "C:\users\administrator\desktop\client.csr" | Set-Clipboard +``` + +On your mac: + +```shell +# On your Mac, use the CA to issue a serving cert based on the CSR. +pbpaste > client.csr + +cat < v3ext.txt +keyUsage=digitalSignature,keyEncipherment +extendedKeyUsage=serverAuth +subjectKeyIdentifier=hash +subjectAltName = @alt_names +[alt_names] + DNS.1 = activedirectory.test.pinniped.dev + DNS.2 = active-directory-dc1.activedirectory.test.pinniped.dev +EOF + +# Create a cert from the CSR signed by the CA. +openssl x509 \ + -req -days 36500 \ + -in client.csr -CA ad-ca.crt -CAkey ad-ca.key -extfile v3ext.txt \ + -set_serial 01 -out client.crt + +# Inspect the generated certificate. +# Ensure the following X509v3 extensions are all present: +# Key Usage: Digital Signature, Key Encipherment +# Extended Key Usage: TLS Web Server Authentication +# Subject Key Identifier +# Subject Alternative Name with 2 DNS hostnames +# Authority Key Identifier +openssl x509 -in client.crt -text + +# Copy the generated cert. +cat client.crt | pbcopy +``` + +In Powershell terminal: + +```shell +# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file. +# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the file contents +# from above again before you hit return for this command. +Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\client.crt" + +# In Powershell terminal, check that the file exists and looks correct. +type "C:\users\administrator\desktop\client.crt" + +# Add the serving certificate to Windows. This will also automatically associate it to the private key that you +# generated with the previous usage of certreq. +certreq -accept "C:\users\administrator\desktop\client.crt" + +# If you want to validate that this was imported, open the UI tool called "Manage computer certificates" +# and look in the folder called "Personal\Certificates". If the UI was already open, click the refresh button. +# Double click on the cert. Ensure that it says, "you have a private key that corresponds to this certificate". +# Next, we need to reboot the VM for the cert to get picked up and used for serving incoming LDAPS connections. +# After showing you a warning dialog box, this should terminate your RDP session and stop the VM. +shutdown /s +``` + +Wait for the VM to stop, then start the VM again from your Mac: + +```shell +gcloud compute instances start active-directory-dc1 --project ${project} --zone ${zone} +``` + +Wait for the VM to finish booting. Then we can confirm that LDAPS is working. On your Mac: + +```shell +# Check that serving cert is being returned on the LDAPS port. This command should show the cert chain. +# It should also verify the server cert using our CA. The output should include "Verify return code: 0 (ok)". +openssl s_client -connect activedirectory.test.pinniped.dev:636 -showcerts -CAfile ad-ca.crt < /dev/null + +# Unfortunately, the ldapsearch command that comes pre-installed on MacOS does not seem to respect +# the LDAPTLS_CACERT env variable. So it will not be able to validate the server certificates. +# As a workaround, we can use docker to run ldapsearch commands in a linux container. + +# Test the regular LDAP port by issuing a query on your Mac. The -ZZ option asks it to use StartTLS. +# This should list all users. Replace REDACTED_BIND_USER_PASSWORD with the real password. +docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \ + ldapsearch -d8 -v -x -ZZ -H 'ldap://activedirectory.test.pinniped.dev' \ + -D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \ + -w 'REDACTED_BIND_USER_PASSWORD' \ + -b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \ + -s sub \ + '(objectClass=user)' '*' + +# Test the LDAPS port by issuing a query on your Mac. This should list all users. +# Replace REDACTED_BIND_USER_PASSWORD with the real password. +docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \ + ldapsearch -d8 -v -x -H 'ldaps://activedirectory.test.pinniped.dev' \ + -D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \ + -w 'REDACTED_BIND_USER_PASSWORD' \ + -b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \ + -s sub \ + '(objectClass=user)' '*' +``` + +## Update the `concourse-secrets` secret in GCP Secrets Manager + +On your Mac: + +```shell +# Copy the CA's public cert. +cat ad-ca.crt | base64 | pbcopy + +# cd to your local clone of the `ci` branch of the pinniped repo +cd pinniped-ci-branch + +# Edit the secret. +./hack/edit-gcloud-secret.sh concourse-secret +# This opens vim to edit the secret. +# Paste the cert as the value for `aws-ad-ca-data`. +# Also edit the the value of `aws-ad-user-unique-id-attribute-value`. The value should be the ObjectGUID of the pinny +# user that you created in the steps above. +# Save your changes, exit vim, and when prompted say that you want to save this as the new version of concourse-secrets. +``` + +## Confirm that Active Directory integration tests can pass + +Use these commands run all the Active Directory integration tests on your Mac. +The `-run` filter is based on the tests as they existed at the time of writing this doc. +You can find AD tests by searching for `SkipTestWhenActiveDirectoryIsUnavailable`. + +On your Mac: + +```shell +# Login so we can read the secrets from GCP Secret Manager. +gcloud auth login + +# cd to your local git clone +cd pinniped + +# Compile and install onto a local kind cluster. +./hack/prepare-for-integration-tests.sh -c --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh" + +# Run all the tests that depend on AD. +source /tmp/integration-test-env && go test -v -race -count 1 -timeout 0 ./test/integration \ + -run "/TestSupervisorLogin_Browser/active_directory|/TestE2EFullIntegration_Browser/with_Supervisor_ActiveDirectory|/TestActiveDirectoryIDPPhaseAndConditions_Parallel|/TestSupervisorWarnings_Browser/Active_Directory" +``` + +## Cleanup + +On your Mac: + +```shell +# Remove all bindings for the service account from the secret. +# The binding was only needed during the first boot of the VM. +gcloud secrets remove-iam-policy-binding active-directory-dc1-password \ + --project ${project} \ + --member "serviceAccount:${dcServiceAccount}" --role roles/secretmanager.secretAccessor \ + --all + +# Remove the firewall rule which allows incoming RDP connections. +# If you need to RDP to this AD VM in the future, then you will need to create +# a new firewall rule to allow it. +gcloud compute firewall-rules delete allow-rdp-ingress-to-addc \ + --project ${project} \ + --quiet + +# Remove all temp files. It's okay to remove the private key for our CA because we +# created certs that are good for 100 years, as long as you have already added the +# public cert to the concourse-secrets secret. If we need to create a new AD VM, we +# can also create a new CA. +rm ad-ca.crt ad-ca.key client.crt client.csr v3ext.txt +``` diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..bcaf5107c --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +Please see https://github.com/vmware-tanzu/pinniped/blob/main/CODE_OF_CONDUCT.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..f317e8d99 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +Please see https://github.com/vmware-tanzu/pinniped/blob/main/CONTRIBUTING.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 000000000..301982f18 --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1 @@ +Please see https://github.com/vmware-tanzu/pinniped/blob/main/MAINTAINERS.md diff --git a/README.md b/README.md index a84c0d2c1..b25dc8ae1 100644 --- a/README.md +++ b/README.md @@ -1 +1,180 @@ -# placeholder-name \ No newline at end of file +# Pinniped's `ci` branch + +This `ci` branch contains the CI/CD tooling for [Pinniped](https://github.com/vmware-tanzu/pinniped). + +The documentation and code in this branch is mainly intended for the maintainers of Pinniped. + +This branch is not intended to be merged to the `main` branch. + +The code in the branch previously lived in a private repository. It was made public by moving +the code into the `ci` branch of the Pinniped repository in late 2024. The previous git history +for these files was not copied from the private repository at the time of this migration. + +## Reporting an issue in this branch + +Found a bug or would like to make an enhancement request? +Please report issues in the [this repo](https://github.com/vmware-tanzu/pinniped). + +## Reporting security vulnerabilities + +Please follow the procedure described in [SECURITY.md](https://github.com/vmware-tanzu/pinniped/blob/main/SECURITY.md). + +## Creating a release + +When the team is preparing to ship a release, a maintainer will create a new +GitHub [Issue](https://github.com/vmware-tanzu/pinniped/issues/new/choose) in this repo to +collaboratively track progress on the release checklist. As tasks are completed, +the team will check them off. When all the tasks are completed, the issue is closed. + +The release checklist is committed to this repo as an [issue template](https://github.com/vmware-tanzu/pinniped/tree/main/.github/ISSUE_TEMPLATE/release_checklist.md). + +## Pipelines + +Pinniped uses [Concourse](https://concourse-ci.org) for CI/CD. +Our Concourse can be found at [ci.pinniped.dev](https://ci.pinniped.dev). + +The following pipelines are implemented in this branch. Not all pipelines are necessarily publicly visible, although our goal is to make them all visible. + +- `main` + + This is the main pipeline that runs on merges to `main`. It builds, tests, and (when manually triggered) releases from main. + +- `pull-requests` + + This is a pipeline that triggers for each open pull request. It runs a smaller subset of the integration tests and validations as `pinniped`. + +- `dockerfile-builders` + + This pipeline builds a bunch of custom utility container images that are used in our CI and testing. + + - `build-gi-cli` (a container image that includes the GitHub CLI) + - `build-github-pr-resource` (a [fork](https://github.com/pinniped-ci-bot/github-pr-resource) of the `github-pr-resource` with support for gating PRs for untrusted users) + - `build-code-coverage-uploader` (uploading code coverage during unit tests) + - `build-eks-deployer-dockerfile` (deploying our app to EKS clusters) + - `build-k8s-app-deployer-dockerfile` (deploying our app to clusters) + - `build-pool-trigger-resource-dockerfile` (an updated implementation of the [pool-trigger-resource](https://github.com/cfmobile/pool-trigger-resource) for use in our CI) + - `build-integration-test-runner-dockerfile` (running our integration tests) + - `build-integration-test-runner-beta-dockerfile` (running our integration tests with the latest Chrome beta version) + - `build-deployment-yaml-formatter-dockerfile` (templating our deployment YAML during a release) + - `build-crane` (copy and tag container images during release) + - `build-k8s-code-generator-*` (running our Kubernetes code generation under different Kubernetes dependency versions) + - `build-test-dex` (a Dex used during tests) + - `build-test-cfssl` (a cfssl used during tests) + - `build-test-kubectl` (a kubectl used during tests) + - `build-test-forward-proxy` (a Squid forward proxy used during tests) + - `build-test-bitnami-ldap` (an OpenLDAP used during tests) + +- `cleanup-aws` + + This runs a script that runs [aws-nuke](https://github.com/rebuy-de/aws-nuke) against our test AWS account. + This was occasionally needed because [eksctl](https://eksctl.io/) sometimes fails and leaks AWS resources. These resources cost money and use up our AWS quota. + However, we seem to have worked around these issues and this pipeline has not been used for some time. + + These jobs are only triggered manually. This is dangerous and should be used with care. + +- `concourse-workers` + + Deploys worker replicas on a long-lived GKE cluster that runs the Concourse workers, and can scale them up or down. + +- `go-compatibility` + + This pipeline runs nightly jobs that validate the compatibility of our code as a Go module in various contexts. We have jobs that test that our code compiles under older Go versions and that our CLI can be installed using `go install`. + +- `security-scan` + + This pipeline has nightly jobs that run security scans on our current main branch and most recently released artifacts. + + The tools we use are: + - [sonatype-nexus-community/nancy](https://github.com/sonatype-nexus-community/nancy), which scans Go module versions. + - [aquasecurity/trivy](https://github.com/aquasecurity/trivy), which scans container images and Go binaries. + - [govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck), which scans Go code to find calls to known-vulnerable dependencies. + + This pipeline also has a job called `all-golang-deps-updated` which automatically submits PRs to update all + direct dependencies in Pinniped's go.mod file, and update the Golang and distroless container images used in + Pinniped's Dockerfiles. + +- `kind-node-builder` + + A nightly build job which uses the latest version of kind to build the HEAD of master of Kubernetes as a container + image that can be used to deploy kind clusters. Other pipelines use this container image to install Pinniped and run + integration tests. This gives us insight in any compatibility problems with the upcoming next release of Kubernetes. + +## Deploying pipeline changes + +After any shared tasks (`./pipelines/shared-tasks`) or helpers (`./pipelines/shared-helpers`) are edited, +the commits must be pushed to the `ci` branch of this repository to take effect. + +After editing any CI secrets or pipeline definitions, a maintainer must run the corresponding +`./pipelines/$PIPELINE_NAME/update-pipeline.sh` script to apply the changes to Concourse. +To deploy _all_ pipelines, a maintainer can run `./pipelines/update-all-pipelines.sh`. +Don't forget to commit and push your changes after applying them! + +## Github webhooks for pipelines + +Some pipelines use github [webhooks to trigger resource checks](https://concourse-ci.org/resources.html#schema.resource.webhook_token), +rather than the default of polling every minute, to make these pipelines more responsive and use fewer compute resources +for running checks. Refer to places where `webhook_token` is configured in various `pipeline.yml` files. + +To make these webhooks work, they must be defined on the [GitHub repo's settings](https://github.com/vmware-tanzu/pinniped/settings/hooks). + +## Installing and operating Concourse + +See [infra/README.md](./infra/README.md) for details about how Concourse was installed and how it can be operated. + +## Acceptance environments + +In addition to the many ephemeral Kubernetes clusters we use for testing, we also deploy a long-running acceptance environment. + +Google Kubernetes Engine (GKE) in the `gke-acceptance-cluster` cluster in our GCP project in the `us-central1-c` availability zone. + +To access this cluster, download the kubeconfig to `gke-acceptance.yaml` by running: + +```cmd +KUBECONFIG=gke-acceptance.yaml gcloud container clusters get-credentials gke-acceptance-cluster --project "$PINNIPED_GCP_PROJECT" --zone us-central1-c +``` + +The above command assumes that you have already set `PINNIPED_GCP_PROJECT` to be the name of the GCP project. + +## CI secrets + +We use [Google Secret Manager](https://cloud.google.com/secret-manager) on GCP to store build/test/release secrets. +These secrets are only available to the maintainers. + +Using the `gcloud secrets list` command or the [web console](https://console.cloud.google.com/security/secret-manager), +you can list the available secrets. The content of each secret is a YAML file with secret key/value pairs. +You can also use the `./hack/edit-gcloud-secret.sh ` script to edit or inspect each secret. + +## Setting Up Active Directory Test Environment + +To test the `ActiveDirectoryIdentityProvider` functionality, we have a long-running Active Directory Domain Controller +server instance in our GCP account. See [AD-SETUP.md](AD-SETUP.md) for details. + +## Running integration tests on your laptop using AD + +The relevant environment variables can be pulled from the secret manager via the `hack/get-active-directory-env-vars.sh` script. +This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way: + + ```bash + # Must authenticate to glcoud to access the secret manager. + gcloud auth login + # In the pinniped repo's main branch or in your PR branch: + hack/prepare-for-integration-tests.sh --get-active-directory-vars "$HOME/path/to/pinniped-ci-branch/hack/get-active-directory-env-vars.sh" + ``` + +## Running integration tests on your laptop using GitHub + +The relevant environment variables can be pulled from the secret manager via the `hack/get-github-env-vars.sh` script. +This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way: + + ```bash + # Must authenticate to glcoud to access the secret manager. + gcloud auth login +# In the pinniped repo's main branch or in your PR branch: + hack/prepare-for-integration-tests.sh --get-github-vars "$HOME/path/to/pinniped-ci-branch/hack/get-github-env-vars.sh" + ``` + +## License + +Pinniped is open source and licensed under Apache License Version 2.0. See [LICENSE](LICENSE). + +Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..ac0e30d9a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1 @@ +Please see https://github.com/vmware-tanzu/pinniped/blob/main/SECURITY.md diff --git a/dockerfiles/code-coverage-uploader/Dockerfile b/dockerfiles/code-coverage-uploader/Dockerfile new file mode 100644 index 000000000..af36f9cc3 --- /dev/null +++ b/dockerfiles/code-coverage-uploader/Dockerfile @@ -0,0 +1,14 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# For running Go linters +FROM debian:12.7-slim AS builder + +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +RUN curl -sfLo /tmp/codecov https://uploader.codecov.io/latest/linux/codecov +RUN chmod +x /tmp/codecov + +FROM golang:1.23.2 +RUN apt-get update -y && apt-get dist-upgrade -y +COPY --from=builder /tmp/codecov /usr/local/bin/codecov diff --git a/dockerfiles/crane/Dockerfile b/dockerfiles/crane/Dockerfile new file mode 100644 index 000000000..cf848964e --- /dev/null +++ b/dockerfiles/crane/Dockerfile @@ -0,0 +1,10 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +FROM gcr.io/go-containerregistry/crane as crane +FROM mikefarah/yq:4.44.3 AS yq + +FROM golang:1.23 +COPY --from=yq /usr/bin/yq /usr/local/bin +COPY --from=crane /ko-app/crane /usr/local/bin +ENTRYPOINT ["bash"] diff --git a/dockerfiles/deployment-yaml-formatter/Dockerfile b/dockerfiles/deployment-yaml-formatter/Dockerfile new file mode 100644 index 000000000..db07b8df3 --- /dev/null +++ b/dockerfiles/deployment-yaml-formatter/Dockerfile @@ -0,0 +1,16 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +FROM mikefarah/yq:4.44.3 AS yq + +FROM debian:12.7-slim + +# Note: libdigest-sha-perl is to get shasum, which is used when installing Carvel tools below. +RUN apt-get update && apt-get install -y ca-certificates jq curl libdigest-sha-perl && rm -rf /var/lib/apt/lists/* + +# Install Carvel tools. +RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \ + ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version + +# Install yq. +COPY --from=yq /usr/bin/yq /usr/local/bin/yq diff --git a/dockerfiles/eks-deployer/Dockerfile b/dockerfiles/eks-deployer/Dockerfile new file mode 100644 index 000000000..062d142ce --- /dev/null +++ b/dockerfiles/eks-deployer/Dockerfile @@ -0,0 +1,25 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# For deploying an EKS cluster and setting it up to run our tests. + +FROM weaveworks/eksctl:v0.193.0 AS eksctl +FROM mikefarah/yq:4.44.3 AS yq +FROM amazon/aws-cli:2.18.15 +RUN yum update -y && yum install -y jq && yum install -y perl-Digest-SHA && yum clean all +COPY --from=eksctl /usr/local/bin /usr/local/bin +COPY --from=yq /usr/bin/yq /usr/local/bin/yq + +# Install Carvel tools. +RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \ + ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version + +# Install aws-iam-authenticator. +# This gets installed automatically via eksctl, but currently it downloads v0.5.2, +# which will give us a v1alpha1 execcredential rather than a v1beta1 which we want. +# When this has changed, we can delete this: +# https://github.com/weaveworks/eksctl/blob/main/build/docker/Dockerfile#L49 +RUN curl -sfL \ + https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \ + -o /usr/local/bin/aws-iam-authenticator \ + && chmod u+x /usr/local/bin/aws-iam-authenticator diff --git a/dockerfiles/gh-cli/Dockerfile b/dockerfiles/gh-cli/Dockerfile new file mode 100644 index 000000000..9e75310b0 --- /dev/null +++ b/dockerfiles/gh-cli/Dockerfile @@ -0,0 +1,15 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# For running the GitHub CLI. +FROM debian:12.7-slim AS builder + +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +RUN curl \ + -sfLo /tmp/gh.tar.gz \ + https://github.com/cli/cli/releases/download/v2.40.0/gh_2.40.0_linux_amd64.tar.gz \ + && tar -C /tmp --strip-components=1 -xzvf /tmp/gh.tar.gz + +FROM golang:1.23.2 +COPY --from=builder /tmp/bin/gh /usr/local/bin/gh diff --git a/dockerfiles/integration-test-runner-beta/Dockerfile b/dockerfiles/integration-test-runner-beta/Dockerfile new file mode 100644 index 000000000..3d1c52a52 --- /dev/null +++ b/dockerfiles/integration-test-runner-beta/Dockerfile @@ -0,0 +1,80 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# For running the integration tests as a client to a k8s cluster + +FROM mikefarah/yq:4.44.3 AS yq + +# We need gcloud for running integration tests against GKE +# because the kubeconfig uses gcloud as an `auth-provider`. +# Use FROM gcloud-sdk instead of FROM golang because its +# a lot easier to install Go than to install gcloud in the +# subsequent commands below. +FROM google/cloud-sdk:498.0.0-slim + +# Install apache2-utils (for htpasswd to bcrypt passwords for the +# local-user-authenticator) and jq. +RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/* + +# Print version of gke-gcloud-auth-plugin +RUN gke-gcloud-auth-plugin --version + +# Create a non-root user account that can be used to run the tests. +RUN useradd --create-home testrunner + +# Install latest beta chrome. +RUN \ + chown root:root /tmp && \ + chmod 1777 /tmp && \ + curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \ + echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \ + apt-get -y update && \ + apt-get -y install google-chrome-beta + +# Output Chrome version used +RUN google-chrome --version + +# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/ +ENV PATH /usr/local/go/bin:$PATH +RUN curl -fsSL https://go.dev/dl/go1.23.2.linux-amd64.tar.gz -o /tmp/go.tar.gz && \ + tar -C /usr/local -xzf /tmp/go.tar.gz && \ + rm /tmp/go.tar.gz && \ + go version +ENV GOPATH /go +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" +WORKDIR $GOPATH + +# Install go tools gotestsum and test2json to record the test output in a nice format. +RUN go install gotest.tools/gotestsum@latest +RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \ + mv test2json /usr/local/bin/test2json + +# Install Carvel tools. +RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \ + ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version + +# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ +RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \ + -o /bin/kubectl && chmod 0755 /bin/kubectl + +# Install aws-iam-authenticator +RUN curl -fsSL \ + https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \ + -o /bin/aws-iam-authenticator \ + && chmod 0755 /bin/aws-iam-authenticator + +# Install TMC CLI. +# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now. +#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \ +# | jq -r .versions[].linuxX64 \ +# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \ +# tmc version + +# Install yq. +COPY --from=yq /usr/bin/yq /usr/local/bin/yq + +# install latest nmap +RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\ + alien nmap-7.92-1.x86_64.rpm &&\ + dpkg -i nmap_7.92-2_amd64.deb diff --git a/dockerfiles/integration-test-runner/Dockerfile b/dockerfiles/integration-test-runner/Dockerfile new file mode 100644 index 000000000..d7120a771 --- /dev/null +++ b/dockerfiles/integration-test-runner/Dockerfile @@ -0,0 +1,80 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# For running the integration tests as a client to a k8s cluster + +FROM mikefarah/yq:4.44.3 AS yq + +# We need gcloud for running integration tests against GKE +# because the kubeconfig uses gcloud as an `auth-provider`. +# Use FROM gcloud-sdk instead of FROM golang because its +# a lot easier to install Go than to install gcloud in the +# subsequent commands below. +FROM google/cloud-sdk:498.0.0-slim + +# Install apache2-utils (for htpasswd to bcrypt passwords for the +# local-user-authenticator) and jq. +RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/* + +# Print version of gke-gcloud-auth-plugin +RUN gke-gcloud-auth-plugin --version + +# Create a non-root user account that can be used to run the tests. +RUN useradd --create-home testrunner + +# Install latest stable chrome. +RUN \ + chown root:root /tmp && \ + chmod 1777 /tmp && \ + curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \ + echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \ + apt-get -y update && \ + apt-get -y install google-chrome-stable + +# Output Chrome version used +RUN google-chrome --version + +# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/ +ENV PATH /usr/local/go/bin:$PATH +RUN curl -fsSL https://go.dev/dl/go1.23.2.linux-amd64.tar.gz -o /tmp/go.tar.gz && \ + tar -C /usr/local -xzf /tmp/go.tar.gz && \ + rm /tmp/go.tar.gz && \ + go version +ENV GOPATH /go +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" +WORKDIR $GOPATH + +# Install go tools gotestsum and test2json to record the test output in a nice format. +RUN go install gotest.tools/gotestsum@latest +RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \ + mv test2json /usr/local/bin/test2json + +# Install Carvel tools. +RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \ + ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version + +# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ +RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \ + -o /bin/kubectl && chmod 0755 /bin/kubectl + +# Install aws-iam-authenticator +RUN curl -fsSL \ + https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \ + -o /bin/aws-iam-authenticator \ + && chmod 0755 /bin/aws-iam-authenticator + +# Install TMC CLI. +# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now. +#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \ +# | jq -r .versions[].linuxX64 \ +# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \ +# tmc version + +# Install yq. +COPY --from=yq /usr/bin/yq /usr/local/bin/yq + +# install latest nmap +RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\ + alien nmap-7.92-1.x86_64.rpm &&\ + dpkg -i nmap_7.92-2_amd64.deb diff --git a/dockerfiles/k8s-app-deployer/Dockerfile b/dockerfiles/k8s-app-deployer/Dockerfile new file mode 100644 index 000000000..830d1a4b8 --- /dev/null +++ b/dockerfiles/k8s-app-deployer/Dockerfile @@ -0,0 +1,34 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# For deploying apps onto Kubernetes clusters (including GKE) + +FROM google/cloud-sdk:498.0.0-slim + +# Install apache2-utils (for htpasswd to bcrypt passwords for the +# local-user-authenticator) and jq. +RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps dnsutils google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/* + +# Print version of gke-gcloud-auth-plugin +RUN gke-gcloud-auth-plugin --version + +# Install Carvel tools. +RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \ + ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version + +# Install latest kubectl. +RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + -o /bin/kubectl && chmod u+x /bin/kubectl + +# Install aws-iam-authenticator +RUN curl -sfL \ + https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \ + -o /bin/aws-iam-authenticator \ + && chmod u+x /bin/aws-iam-authenticator + +# Install TMC CLI. +# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now. +#RUN curl -sfL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \ +# | jq -r .versions[].linuxX64 \ +# | xargs curl -sfL -o /bin/tmc && chmod +x /bin/tmc && \ +# tmc version diff --git a/dockerfiles/k8s-code-generator/Dockerfile b/dockerfiles/k8s-code-generator/Dockerfile new file mode 100644 index 000000000..f3dc186b5 --- /dev/null +++ b/dockerfiles/k8s-code-generator/Dockerfile @@ -0,0 +1,20 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +ARG GO_VERSION + +FROM golang:${GO_VERSION} + +ARG GO_VERSION +ARG K8S_PKG_VERSION +ARG CONTROLLER_GEN_VERSION +ARG CRD_REF_DOCS_COMMIT_SHA + +ENV GO_VERSION=$GO_VERSION +ENV K8S_PKG_VERSION=$K8S_PKG_VERSION +ENV CONTROLLER_GEN_VERSION=$CONTROLLER_GEN_VERSION +ENV CRD_REF_DOCS_COMMIT_SHA=$CRD_REF_DOCS_COMMIT_SHA + +COPY setup.sh /codegen/ + +RUN /codegen/setup.sh diff --git a/dockerfiles/k8s-code-generator/setup.sh b/dockerfiles/k8s-code-generator/setup.sh new file mode 100755 index 000000000..391b13aa2 --- /dev/null +++ b/dockerfiles/k8s-code-generator/setup.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [ -z "$GO_VERSION" ]; then + echo "missing GO_VERSION" + exit 1 +fi +if [ -z "$K8S_PKG_VERSION" ]; then + echo "missing K8S_PKG_VERSION" + exit 1 +fi +if [ -z "$CONTROLLER_GEN_VERSION" ]; then + echo "missing CONTROLLER_GEN_VERSION" + exit 1 +fi + +# Debugging output for CI... +echo "GO_VERSION: $GO_VERSION" +echo "K8S_PKG_VERSION: $K8S_PKG_VERSION" +echo "CONTROLLER_GEN_VERSION: $CONTROLLER_GEN_VERSION" +echo "CRD_REF_DOCS_COMMIT_SHA: $CRD_REF_DOCS_COMMIT_SHA" + +apt-get update -y && apt-get dist-upgrade -y + +cd /codegen/ + +cat <tools.go +package tools + +import ( + _ "k8s.io/apimachinery/pkg/apis/meta/v1" + _ "k8s.io/api/core/v1" + _ "k8s.io/code-generator" +) +EOF + +cat <go.mod +module codegen + +go 1.21 + +require ( + k8s.io/apimachinery v$K8S_PKG_VERSION + k8s.io/code-generator v$K8S_PKG_VERSION + k8s.io/api v$K8S_PKG_VERSION +) +EOF + +# Resolve dependencies and download the modules. +go mod tidy +go mod download + +# Copy the downloaded source code of k8s.io/code-generator so we can "go install" all its commands. +rm -rf "$(go env GOPATH)/src" +mkdir -p "$(go env GOPATH)/src/k8s.io" +cp -pr "$(go env GOMODCACHE)/k8s.io/code-generator@v$K8S_PKG_VERSION" "$(go env GOPATH)/src/k8s.io/code-generator" + +# Install the commands to $GOPATH/bin. Also sed the related shell scripts, but leave those in the src dir. +# Note that update-codegen.sh invokes these shell scripts at this src path. +# The sed is a dirty hack to avoid having the code-generator shell scripts run go install again. +# In version 0.23.0 the line inside the shell script that previously said "go install ..." started +# to instead say "GO111MODULE=on go install ..." so this sed is a little wrong, but still seems to work. +(cd "$(go env GOPATH)/src/k8s.io/code-generator" && + go install -v ./cmd/... && + sed -i -E -e 's/(go install.*)/# \1/g' ./*.sh) + +if [[ ! -f "$(go env GOPATH)/bin/openapi-gen" ]]; then + # Starting in Kube 1.30, openapi-gen moved from k8s.io/code-generator to k8s.io/kube-openapi. + # Assuming that we are still in the /codegen directory, get the specific version of kube-openapi + # that is selected as an indirect dependency by the go.mod. + kube_openapi_version=$(go list -m k8s.io/kube-openapi | cut -f2 -d' ') + # Install that version of its openapi-gen command. + go install -v "k8s.io/kube-openapi/cmd/openapi-gen@$kube_openapi_version" +fi + +go install -v sigs.k8s.io/controller-tools/cmd/controller-gen@v$CONTROLLER_GEN_VERSION + +# We use a commit sha instead of a release semver because this project does not create +# releases very often. They seem to only release 1-2 times per year, but commit to +# main more often. +go install -v github.com/elastic/crd-ref-docs@$CRD_REF_DOCS_COMMIT_SHA + +# List all the commands that we just installed. +echo "Installed the following commands to $(go env GOPATH)/bin:" +ls "$(go env GOPATH)/bin" diff --git a/dockerfiles/pool-trigger-resource/Dockerfile b/dockerfiles/pool-trigger-resource/Dockerfile new file mode 100644 index 000000000..b2d0efdb0 --- /dev/null +++ b/dockerfiles/pool-trigger-resource/Dockerfile @@ -0,0 +1,17 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# We would like to use https://github.com/cfmobile/pool-trigger-resource for our pool recycle jobs. +# Unfortuntely, the pool-trigger-resource repo seems like it is not maintained by anyone. The most recent +# commit was six years ago. On the other hand, its implementation is a shell script which basically +# just calls some git commands, so it shouldn't need much maintaince if it works. +# This is an updated version of https://github.com/cfmobile/pool-trigger-resource/blob/master/Dockerfile +# to use newer versions of linux, jq, and git. The "assets" directory's source code is copied from +# https://github.com/cfmobile/pool-trigger-resource/tree/master/assets as of commit efefe018c88e937. + +FROM debian:12.7-slim + +RUN apt-get update && apt-get install -y ca-certificates jq git && rm -rf /var/lib/apt/lists/* + +ADD assets/ /opt/resource/ +RUN chmod +rx /opt/resource/* diff --git a/dockerfiles/pool-trigger-resource/assets/check b/dockerfiles/pool-trigger-resource/assets/check new file mode 100755 index 000000000..057d0af06 --- /dev/null +++ b/dockerfiles/pool-trigger-resource/assets/check @@ -0,0 +1,219 @@ +#!/bin/sh +# vim: set ft=sh + +set -e + +exec 3>&1 # make stdout available as fd 3 for the result +exec 1>&2 # redirect all output to stderr for logging + +# shellcheck source=./common.sh +. "$(dirname "$0")"/common.sh + +# for jq +PATH=/usr/local/bin:$PATH + +payload=$TMPDIR/git-resource-request + +cat > "$payload" <&0 + + +uri=$(jq -r '.source.uri // ""' < "$payload") +branch=$(jq -r '.source.branch // ""' < "$payload") +pool_name=$(jq -r '.source.pool // ""' < "$payload") +ref=$(jq -r '.version.ref // ""' < "$payload") + +if [ -z "$uri" ]; then + config_errors="${config_errors}invalid payload (missing uri) +" +fi + +if [ -z "$branch" ]; then + config_errors="${config_errors}invalid payload (missing branch) +" +fi + +if [ -z "$pool_name" ]; then + config_errors="${config_errors}invalid payload (missing pool) +" +fi + +if [ -n "$config_errors" ]; then + echo "$config_errors" + exit 1 +fi + +########### +# +# end processing inputs +# +########### + +########### +# +# start git setup +# +########### + +load_pubkey "$payload" + +destination=$TMPDIR/git-resource-repo-cache + +if [ -d "$destination" ]; then + cd "$destination" + git fetch + git reset --hard FETCH_HEAD +else + branchflag="" + if [ -n "$branch" ]; then + branchflag="--branch $branch" + fi + + git clone "$uri" $branchflag "$destination" + cd "$destination" +fi + + +git config user.name "CI Pool Trigger Resource" +git config user.email "ci-pool-trigger@localhost" + +########### +# +# end git setup +# +########### + + +########### +# +# start calculating pending triggers +# +########### + +if [ -n "$ref" ] && git cat-file -e "$ref"; then + ref_exists_and_is_valid=yes +fi + +if [ -e "$pool_name/.pending-triggers" ] && [ -e "$pool_name/.pending-removals" ]; then + tally_files_exist=yes + + #check validity of tally files +fi + +if [ -n "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then + files_changed=$(git show --pretty="format:" --name-status -r "$ref"..HEAD -- "$pool_name"/unclaimed/) + + set +e + added_items=$(echo "$files_changed" | grep "^A") + removed_items=$(echo "$files_changed" | grep "^D") + set -e + + if [ -n "$added_items" ]; then + num_added_items=$(echo "$added_items" | wc -l) + else + num_added_items=0 + fi + + if [ -n "$removed_items" ]; then + num_removed_items=$(echo "$removed_items" | wc -l) + else + num_removed_items=0 + fi + + old_pending_triggers=$(cat "$pool_name"/.pending-triggers) + old_pending_removals=$(cat "$pool_name"/.pending-removals) + + pending_triggers=$(( old_pending_triggers + num_added_items )) + + if [ "$num_removed_items" -gt "$old_pending_removals" ]; then + extra_removals=$(( num_removed_items - old_pending_removals )) + pending_removals=0 + pending_triggers=$(( pending_triggers - extra_removals )) + else + pending_removals=$(( old_pending_removals - num_removed_items )) + fi +else + pending_triggers=$(find "$pool_name"/unclaimed -not -path "*/\.*" -path "$pool_name/unclaimed/*"| wc -l) + pending_removals=0 +fi +########### +# +# end calculating pending triggers +# +########### + + +########### +# +# start handling results +# +########### + +if [ "$pending_triggers" -gt 0 ]; then + last_commit=$(git log -1 --pretty='format:%H') + result=$(echo "$last_commit" | jq -R '.' | jq -s "map({ref: .})") +else + result="[]" +fi + +########### +# +# end handling results +# +########### + + + +########### +# +# start updating triggers +# +########### + +if [ "$pending_triggers" -gt 0 ]; then + new_pending_triggers=$(( pending_triggers - 1 )) + new_pending_removals=$(( pending_removals + 1 )) + echo "$new_pending_triggers" > "$pool_name"/.pending-triggers + echo "$new_pending_removals" > "$pool_name"/.pending-removals + git add "$pool_name"/.pending* + + commit_message="triggering build with pending triggers: $new_pending_triggers; pending removals: $new_pending_removals" + + if [ -n "$ref_exists_and_is_valid" ] && [ -z "$tally_files_exist" ]; then + commit_message="$commit_message + + .pending-triggers and/or .pending-removals are missing - re-initializing resource" + elif [ -z "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then + commit_message="$commit_message + + resource initialized with pre-existing .pending-triggers and .pending-removals - ignoring" + elif [ -z "$ref_exists_and_is_valid" ]; then + commit_message="$commit_message + + initializing tally files" + fi + + if [ -n "$added_items" ]; then + commit_message="$commit_message + + additions: + $added_items" + fi + + if [ -n "$removed_items" ]; then + commit_message="$commit_message + + removals: + $removed_items" + fi + + git commit --allow-empty -m "$commit_message" + git push +fi + +########### +# +# end updating triggers +# +########### + +echo "$result" >&3 \ No newline at end of file diff --git a/dockerfiles/pool-trigger-resource/assets/common.sh b/dockerfiles/pool-trigger-resource/assets/common.sh new file mode 100755 index 000000000..21be36fee --- /dev/null +++ b/dockerfiles/pool-trigger-resource/assets/common.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +export TMPDIR=${TMPDIR:-/tmp} + +load_pubkey() { + local private_key_path=$TMPDIR/git-resource-private-key + + (jq -r '.source.private_key // empty' < "$1") > "$private_key_path" + + if [ -s "$private_key_path" ]; then + chmod 0600 "$private_key_path" + + eval "$(ssh-agent)" >/dev/null 2>&1 + trap 'kill $SSH_AGENT_PID' 0 + + ssh-add "$private_key_path" >/dev/null 2>&1 + + mkdir -p ~/.ssh + cat > ~/.ssh/config <&1 diff --git a/dockerfiles/test-forward-proxy/squid.conf b/dockerfiles/test-forward-proxy/squid.conf new file mode 100644 index 000000000..a08154311 --- /dev/null +++ b/dockerfiles/test-forward-proxy/squid.conf @@ -0,0 +1,56 @@ +## listen on TCP 3128 +http_port 3128 + +## Prevent caching anything (pass through only) +cache deny all + +## Allow all connections. +http_access allow all + +## Where does Squid log to? +cache_store_log none +cache_log /dev/null +access_log daemon:/var/log/squid/access.log squid +access_log syslog:user.info squid + +## When logging, web auditors want to see the full uri, even with the query terms +strip_query_terms off + +## Keep 7 days of logs +logfile_rotate 7 + +## How much RAM, in MB, to use for cache? Default since squid 3.1 is 256 MB +cache_mem 8 MB + +## Maximum size of individual objects to store in cache +maximum_object_size 1 MB + +## Amount of data to buffer from server to client +read_ahead_gap 64 KB + +## Number of file descriptors to support (default is 2**20 which takes up ~408 MB of memory) +max_filedescriptors 65536 + +## Drop X-Forwarded-For headers +forwarded_for delete + +## Suppress sending squid version information +httpd_suppress_version_string on + +## How long to wait when shutting down squid +shutdown_lifetime 10 seconds + +## What hostname to display? (defaults to system hostname) +visible_hostname proxy + +## Drop some response headers that Squid normally adds (just being paranoid here) +reply_header_access Server deny all +reply_header_access Via deny all +reply_header_access X-Cache deny all +reply_header_access X-Cache-Lookup deny all +reply_header_access X-Squid-Error deny all + +## Drop denied connections with just a TCP reset (no error page that might leak info) +deny_info TCP_RESET all + +dns_v4_first off diff --git a/dockerfiles/test-kubectl/Dockerfile b/dockerfiles/test-kubectl/Dockerfile new file mode 100644 index 000000000..0b21010cd --- /dev/null +++ b/dockerfiles/test-kubectl/Dockerfile @@ -0,0 +1,4 @@ +# Copyright 2021 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +FROM bitnami/kubectl:latest diff --git a/hack/approve-and-merge.sh b/hack/approve-and-merge.sh new file mode 100755 index 000000000..03e4cc59d --- /dev/null +++ b/hack/approve-and-merge.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +repo=vmware-tanzu/pinniped +current_branch_name=$(git rev-parse --abbrev-ref HEAD) + +if [[ "$current_branch_name" != "ci" ]]; then + echo "error: this script should only be used on the ci branch" + exit 1 +fi + +# Print the list of PRs to the screen. +PAGER='' gh pr list --base ci --repo $repo --limit 1000 + +# Exit if there are no PRs found. +count_prs=$(gh pr list --base ci --repo $repo --jq ". | length" --json "number") +if [[ "${count_prs}" == "0" ]]; then + exit 0 +fi + +read -p "Do you wish to approve and merge these PRs for the ci branch? y/n: " yn +case $yn in + [Yy]* );; + * ) exit 0;; +esac + +gh pr list --base ci --repo $repo --json="number" --jq ".[] | .number" \ + | xargs -I{} gh pr review {} --approve + +gh pr list --base ci --repo $repo --json="number" --jq ".[] | .number" \ + | xargs -I{} gh pr merge {} --merge --delete-branch + +echo "now pulling the merged commits" +git pull --rebase --autostash diff --git a/hack/create-gke-acceptance-env.sh b/hack/create-gke-acceptance-env.sh new file mode 100755 index 000000000..5e21d17db --- /dev/null +++ b/hack/create-gke-acceptance-env.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if ! [ -x "$(command -v gcloud)" ]; then + echo 'Error: Google Cloud SDK (gcloud) is not installed (see https://cloud.google.com/sdk/docs/quickstarts).' >&2 + exit 1 +fi + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +# Create (or recreate) a GKE acceptance cluster. +# Pro tip: The GCP Console UI can help you build this command. +# The following fields were customized, and all of the others are left as the GCP Console's defaults: +# - Cluster name +# - Cluster version - newest at the time +# - Num nodes - sized smaller to be cheaper +# - Maintenance window start and recurrence - to avoid downtime during business hours +# - Issue client certificate - to make it possible to use an admin kubeconfig without the GKE auth plugin +gcloud container --project "$PINNIPED_GCP_PROJECT" clusters create "gke-acceptance-cluster" \ + --zone "us-central1-c" --no-enable-basic-auth --cluster-version "1.30.4-gke.1348000" --release-channel "regular" \ + --machine-type "e2-medium" \ + --image-type "COS_CONTAINERD" --disk-type "pd-balanced" --disk-size "100" --metadata disable-legacy-endpoints=true \ + --scopes "https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \ + --num-nodes "1" \ + --logging=SYSTEM,WORKLOAD --monitoring=SYSTEM,STORAGE,POD,DEPLOYMENT,STATEFULSET,DAEMONSET,HPA,CADVISOR,KUBELET \ + --enable-ip-alias \ + --network "projects/$PINNIPED_GCP_PROJECT/global/networks/default" \ + --subnetwork "projects/$PINNIPED_GCP_PROJECT/regions/us-central1/subnetworks/default" \ + --no-enable-intra-node-visibility \ + --default-max-pods-per-node "110" \ + --security-posture=standard --workload-vulnerability-scanning=disabled --no-enable-master-authorized-networks \ + --addons HorizontalPodAutoscaling,HttpLoadBalancing,GcePersistentDiskCsiDriver \ + --enable-autoupgrade --enable-autorepair --max-surge-upgrade 1 --max-unavailable-upgrade 0 \ + --binauthz-evaluation-mode=DISABLED --enable-managed-prometheus --enable-shielded-nodes --node-locations "us-central1-c" \ + --maintenance-window-start "2020-07-01T03:00:00Z" --maintenance-window-end "2020-07-01T11:00:00Z" \ + --maintenance-window-recurrence "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA,SU" \ + --issue-client-certificate diff --git a/hack/edit-gcloud-secret.sh b/hack/edit-gcloud-secret.sh new file mode 100755 index 000000000..3b85e0c81 --- /dev/null +++ b/hack/edit-gcloud-secret.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -e + +if [ -z "$1" ]; then + echo "usage: $0 SECRET_NAME" + exit 1 +fi + +set -u +if ! command -v yq &> /dev/null; then + echo "Please install the yq CLI" + exit 1 +fi +if ! command -v delta &> /dev/null; then + echo "Please install the delta CLI (brew install git-delta)" + exit 1 +fi +if ! command -v gcloud &> /dev/null; then + echo "Please install the gcloud CLI" + exit 1 +fi +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +# Create a temporary directory for secrets, cleaned up at the end of this script. +trap 'rm -rf "$TEMP_DIR"' EXIT +TEMP_DIR=$(mktemp -d) || exit 1 + +# Grab the current version. +echo "Downloading the latest version of '$1'..." +gcloud secrets versions access latest --secret="$1" --project "$PINNIPED_GCP_PROJECT" > "$TEMP_DIR/$1.yaml" + +# Use yq to format the YAML into a consistent style. +# TODO: there is a bug in yq that strips leading comments on the first lines of a file when -P is used. +# For now, we'll skip the pretty-printing. +# yq eval -i -P '.' "$TEMP_DIR/$1.yaml" +yq eval -i '.' "$TEMP_DIR/$1.yaml" +cp "$TEMP_DIR/$1.yaml" "$TEMP_DIR/$1-original.yaml" + +# Invoke $EDITOR to modify the file. +${EDITOR:-vim} "$TEMP_DIR/$1.yaml" + +# Format the output from the editor just as we did before the edit. + +# TODO: there is a bug in yq that strips leading comments on the first lines of a file when -P is used. +# For now, we'll skip the pretty-printing. +# yq eval -i -P '.' "$TEMP_DIR/$1.yaml" +yq eval -i '.' "$TEMP_DIR/$1.yaml" + +# Dump the diff using git-delta. +( cd "$TEMP_DIR" && delta "$1-original.yaml" "$1.yaml" || true ) + +read -p "Save as new version of '$1' [yN]: " -r +echo +if [[ $REPLY =~ ^[Yy]$ ]] +then + gcloud secrets versions add "$1" --data-file "$TEMP_DIR/$1.yaml" --project "$PINNIPED_GCP_PROJECT" +fi diff --git a/hack/fly-helpers.sh b/hack/fly-helpers.sh new file mode 100644 index 000000000..1739a4579 --- /dev/null +++ b/hack/fly-helpers.sh @@ -0,0 +1,58 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# +# Some global fly config. +# +export FLY_CLI=/usr/local/bin/fly +export CONCOURSE_URL=https://ci.pinniped.dev +export CONCOURSE_TEAM=main +export CONCOURSE_TARGET=pinniped +export ROOT_DIR +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.." + +# +# Some helper functions for the update-pipeline scripts to use. +# +function set_pipeline() { + # Ensure that fly is installed/upgraded/configured. + "$ROOT_DIR"/hack/setup-fly.sh + + # Ensure that the user is authenticated with gcloud. + if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\` and try again." + exit 1 + fi + + if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 + fi + + # Local vars. + local pipeline_name=$1 + local pipeline_file=$2 + local gcloud_project="$PINNIPED_GCP_PROJECT" + local gcloud_secret_name=concourse-secrets + + # Create/update the pipeline. + $FLY_CLI --target "$CONCOURSE_TARGET" set-pipeline \ + --pipeline "$pipeline_name" \ + --config "$pipeline_file" \ + --load-vars-from <(gcloud secrets versions access latest \ + --secret="$gcloud_secret_name" \ + --project "$gcloud_project") +} + +function ensure_time_resource_has_at_least_one_version() { + local pipeline_name=$1 + local resource_name=$2 + + # Force the specified time resource to have at least one version. Idempotent. + # For a new pipeline, a time resource will have no versions until the specified time has occurred. + # For example, a once-per-night time resource will have no versions until that time + # has passed on the first night. + $FLY_CLI --target "$CONCOURSE_TARGET" check-resource \ + --resource "$pipeline_name/$resource_name" \ + --from "time:2000-01-01T00:00:00Z" >/dev/null +} diff --git a/hack/get-aws-ad-env-vars.sh b/hack/get-aws-ad-env-vars.sh new file mode 100755 index 000000000..9689d42c4 --- /dev/null +++ b/hack/get-aws-ad-env-vars.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# To be run before local integration tests. +# From the pinniped repo: +# hack/prepare-for-integration-tests.sh --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh" +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +function _get_concourse_secret { + gcloud secrets versions access latest --secret="concourse-secrets" --project "$PINNIPED_GCP_PROJECT" | yq e "$1" +} + +export PINNIPED_TEST_AD_HOST="$(_get_concourse_secret '.aws-ad-host')" +export PINNIPED_TEST_AD_DOMAIN="$(_get_concourse_secret '.aws-ad-domain')" +export PINNIPED_TEST_AD_BIND_ACCOUNT_USERNAME="$(_get_concourse_secret '.aws-ad-bind-account-username')" +export PINNIPED_TEST_AD_BIND_ACCOUNT_PASSWORD="$(_get_concourse_secret '.aws-ad-bind-account-password')" +export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME="objectGUID" +export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE="$(_get_concourse_secret '.aws-ad-user-unique-id-attribute-value')" +export PINNIPED_TEST_AD_USER_USER_PRINCIPAL_NAME="$(_get_concourse_secret '.aws-ad-user-userprincipalname')" +export PINNIPED_TEST_AD_USER_PASSWORD="$(_get_concourse_secret '.aws-ad-user-password')" +export PINNIPED_TEST_AD_LDAPS_CA_BUNDLE="$(_get_concourse_secret '.aws-ad-ca-data')" +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_DN="$(_get_concourse_secret '.aws-ad-expected-direct-groups-dn')" +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_CN="$(_get_concourse_secret '.aws-ad-expected-direct-groups-cn')" +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME="$(_get_concourse_secret '.aws-ad-expected-direct-and-nested-groups-samaccountnames')" +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES="$(_get_concourse_secret '.aws-ad-expected-direct-and-nested-groups-samaccountname-domainnames')" +export PINNIPED_TEST_DEACTIVATED_AD_USER_SAMACCOUNTNAME="$(_get_concourse_secret '.aws-ad-deactivated-user-samaccountname')" +export PINNIPED_TEST_DEACTIVATED_AD_USER_PASSWORD="$(_get_concourse_secret '.aws-ad-deactivated-user-password')" +export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_NAME="mail" +export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_VALUE="$(_get_concourse_secret '.aws-ad-user-email-attribute-value')" +export PINNIPED_TEST_AD_DEFAULTNAMINGCONTEXT_DN="$(_get_concourse_secret '.aws-ad-defaultnamingcontext')" +export PINNIPED_TEST_AD_USERS_DN="$(_get_concourse_secret '.aws-ad-users-dn')" + +unset -f _get_concourse_secret diff --git a/hack/get-github-env-vars.sh b/hack/get-github-env-vars.sh new file mode 100755 index 000000000..3ba143808 --- /dev/null +++ b/hack/get-github-env-vars.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# To be run before local integration tests. +# From the pinniped repo: +# hack/prepare-for-integration-tests.sh --get-github-vars "../pinniped-ci-branch/hack/get-github-env-vars.sh" +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +function _get_concourse_secret { + gcloud secrets versions access latest --secret="concourse-secrets" --project "$PINNIPED_GCP_PROJECT" | yq e "$1" +} + +export PINNIPED_TEST_GITHUB_APP_CLIENT_ID="$(_get_concourse_secret '.github-app-client-id')" +export PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET="$(_get_concourse_secret '.github-app-client-secret')" + +export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID="$(_get_concourse_secret '.github-oauth-app-client-id')" +export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET="$(_get_concourse_secret '.github-oauth-app-client-secret')" +export PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL="$(_get_concourse_secret '.github-oauth-app-allowed-callback-url')" + +export PINNIPED_TEST_GITHUB_USER_USERNAME="$(_get_concourse_secret '.github-username')" +export PINNIPED_TEST_GITHUB_USER_PASSWORD="$(_get_concourse_secret '.github-password')" +export PINNIPED_TEST_GITHUB_USER_OTP_SECRET="$(_get_concourse_secret '.github-user-otp-secret')" + +export PINNIPED_TEST_GITHUB_USERID="$(_get_concourse_secret '.github-userid')" +export PINNIPED_TEST_GITHUB_ORG="$(_get_concourse_secret '.github-org')" +export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES="$(_get_concourse_secret '.github-expected-team-names')" +export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS="$(_get_concourse_secret '.github-expected-team-slugs')" + +unset -f _get_concourse_secret diff --git a/hack/list-all-running-jobs.sh b/hack/list-all-running-jobs.sh new file mode 100755 index 000000000..c3b05da00 --- /dev/null +++ b/hack/list-all-running-jobs.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Define some env vars +source "$script_dir/fly-helpers.sh" + +# Setup and login if needed +"$ROOT_DIR"/hack/setup-fly.sh + +# List all jobs that are currently running in CI. +# An empty result means that there are no jobs running. +for p in $($FLY_CLI --target "$CONCOURSE_TARGET" pipelines --json | jq -r ".[].name"); do + $FLY_CLI --target "$CONCOURSE_TARGET" jobs -p "$p" --json | jq -r ".[] | select(.next_build.status == \"started\") | (\"$p/\" + .name)" +done diff --git a/hack/pinniped-pre-commit.sh b/hack/pinniped-pre-commit.sh new file mode 100755 index 000000000..e2aea6f19 --- /dev/null +++ b/hack/pinniped-pre-commit.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +pinniped_ci_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" +pinniped_path="${1-$PWD}" +pinniped_ci_path="${2-$pinniped_ci_root}" + +cd "$pinniped_path" || exit 1 + +if [[ ! -f "./hack/module.sh" ]]; then + echo "$pinniped_path does not appear to be the path to the source code repo directory" + exit 1 +fi + +if [[ ! -f "$pinniped_ci_path/hack/run-integration-tests.sh" ]]; then + echo "$pinniped_ci_path does not appear to be the path to the ci repo directory" + exit 1 +fi + +echo +echo "Running linters..." +./hack/module.sh lint + +echo +echo "Running units..." +./hack/module.sh unittest + +echo +echo "Running integrations..." +"$pinniped_ci_path"/hack/run-integration-tests.sh --from-clean-cluster + +echo +echo "ALL TESTS PASSED" diff --git a/hack/prepare-for-uninstall-test.sh b/hack/prepare-for-uninstall-test.sh new file mode 100755 index 000000000..d4b533daa --- /dev/null +++ b/hack/prepare-for-uninstall-test.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# This script can be used to prepare a kind cluster and deploy the app +# in preparation for running the uninstall test. +# It will also output instructions on how to run the uninstall test. + +set -euo pipefail + +help=no +skip_build=no +pinniped_ci_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +PARAMS="" +while (("$#")); do + case "$1" in + -h | --help) + help=yes + shift + ;; + -s | --skip-build) + skip_build=yes + shift + ;; + -*) + echo "Error: Unsupported flag $1" >&2 + exit 1 + ;; + *) + PARAMS="$PARAMS $1" + shift + ;; + esac +done +eval set -- "$PARAMS" + +if [[ "$help" == "yes" ]]; then + me="$(basename "${BASH_SOURCE[0]}")" + echo "Usage:" + echo " $me [flags] [path/to/pinniped] [path/to/pinniped-ci-branch]" + echo + echo " path/to/pinniped default: \$PWD ($PWD)" + echo " path/to/pinniped-ci-branch default: the parent directory of this script ($pinniped_ci_root)" + echo + echo "Flags:" + echo " -h, --help: print this usage" + echo " -s, --skip-build: reuse the most recently built image of the app instead of building" + exit 1 +fi + +pinniped_path="${1-$PWD}" +pinniped_ci_path="${2-$pinniped_ci_root}" + +if ! command -v kind >/dev/null; then + echo "Please install kind. e.g. 'brew install kind' for MacOS" + exit 1 +fi + +if ! command -v ytt >/dev/null; then + log_error "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS" + exit 1 +fi + +if ! command -v kapp >/dev/null; then + log_error "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS" + exit 1 +fi + +if ! command -v kubectl >/dev/null; then + log_error "Please install kubectl. e.g. 'brew install kubectl' for MacOS" + exit 1 +fi + +cd "$pinniped_path" || exit 1 + +if [[ ! -f Dockerfile || ! -d deploy ]]; then + echo "$pinniped_path does not appear to be the path to the source code repo directory" + exit 1 +fi + +if [[ ! -d "$pinniped_ci_path/pipelines/shared-helpers" ]]; then + echo "$pinniped_ci_path does not appear to be the path to the ci repo directory" + exit 1 +fi + +echo "Deleting running kind clusters to prepare a clean slate for the install+uninstall test..." +kind delete cluster --name pinniped + +echo "Creating a kind cluster..." +kind create cluster --name pinniped + +registry="docker.io" +repo="test/build" +registry_repo="$registry/$repo" +tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy + +if [[ "$skip_build" == "yes" ]]; then + most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1) + if [[ -n "$most_recent_tag" ]]; then + tag="$most_recent_tag" + do_build=no + else + # Oops, there was no previous build. Need to build anyway. + do_build=yes + fi +else + do_build=yes +fi + +registry_repo_tag="${registry_repo}:${tag}" + +if [[ "$do_build" == "yes" ]]; then + # Rebuild the code + echo "Docker building the app..." + docker build . --tag "$registry_repo_tag" +fi + +# Load it into the cluster +echo "Loading the app's container image into the kind cluster..." +kind load docker-image "$registry_repo_tag" --name pinniped + +cat </tmp/uninstall-test-env +# The following env vars should be set before running $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh +export IMAGE_REPO="$registry_repo" +export IMAGE_TAG="$tag" +EOF + +echo "Done!" +echo +echo "Ready to run an uninstall test." +echo " cd $pinniped_path" +echo "Then either" +echo " source /tmp/uninstall-test-env && $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh" +echo "or" +echo " source /tmp/uninstall-test-env && $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh" +echo +echo "When you're finished, use 'kind delete cluster --name pinniped to tear down the cluster." diff --git a/hack/prepare-remote-cluster-for-integration-tests.sh b/hack/prepare-remote-cluster-for-integration-tests.sh new file mode 100755 index 000000000..e2051176e --- /dev/null +++ b/hack/prepare-remote-cluster-for-integration-tests.sh @@ -0,0 +1,248 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Assuming that you have somehow got your hands on a remote GKE or kind cluster, +# and that you have an admin kubeconfig file for it, +# and that you have already built/pushed the Pinniped container image that you would like to test, +# then you can use this script to deploy in preparation for integration or manual testing. + +set -euo pipefail + +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +function log_note() { + GREEN='\033[0;32m' + NC='\033[0m' + if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then + echo -e "${GREEN}$*${NC}" + else + echo "$*" + fi +} + +function log_error() { + RED='\033[0;31m' + NC='\033[0m' + if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then + echo -e "🙁${RED} Error: $* ${NC}" + else + echo ":( Error: $*" + fi +} + +function check_dependency() { + if ! command -v "$1" >/dev/null; then + log_error "Missing dependency..." + log_error "$2" + exit 1 + fi +} + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +# +# Handle argument parsing and help message +# +help=no +kubeconfig="" +image_tag="" +image_repo="" +pinniped_repo="" +cluster_type="" +image_digest="" + +while (("$#")); do + case "$1" in + -h | --help) + help=yes + shift + ;; + -k | --kubeconfig) + shift + # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error + if [[ "$#" == "0" || "$1" == -* ]]; then + log_error "-k|--kubeconfig requires a kubeconfig path to be specified" + exit 1 + fi + kubeconfig=$1 + shift + ;; + -t | --image-tag) + shift + # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error + if [[ "$#" == "0" || "$1" == -* ]]; then + log_error "-t|--image-tag requires a tag to be specified" + exit 1 + fi + image_tag=$1 + shift + ;; + -d | --image-digest) + shift + # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error + if [[ "$#" == "0" || "$1" == -* ]]; then + log_error "--d|--image-digest requires a digest to be specified" + exit 1 + fi + image_digest=$1 + shift + ;; + -r | --image-repo) + shift + # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error + if [[ "$#" == "0" || "$1" == -* ]]; then + log_error "-r|--image-repo requires an image repo to be specified" + exit 1 + fi + image_repo=$1 + shift + ;; + -p | --pinniped-repo) + shift + # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error + if [[ "$#" == "0" || "$1" == -* ]]; then + log_error "-p|--pinniped-repo requires a path to the pinniped repo to be specified" + exit 1 + fi + pinniped_repo=$1 + shift + ;; + -c | --cluster-type) + shift + # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error + if [[ "$#" == "0" || "$1" == -* ]]; then + log_error "-c|--cluster-type requires the type of the cluster to be specified" + exit 1 + fi + cluster_type=$1 + shift + ;; + -*) + log_error "Unsupported flag $1" >&2 + exit 1 + ;; + *) + log_error "Unsupported positional arg $1" >&2 + exit 1 + ;; + esac +done + +# Note that if you are using a remote kind cluster then it might be more convenient to use this public repo: +# ghcr.io/pinniped-ci-bot/manual-test-pinniped-images +# You can give yourself permission to push to that repo at: +# https://github.com/users/pinniped-ci-bot/packages/container/manual-test-pinniped-images/settings +default_image_repo="gcr.io/$PINNIPED_GCP_PROJECT/manual-test-pinniped-images" +default_image_tag="latest" + +if [[ "$help" == "yes" ]]; then + me="$(basename "${BASH_SOURCE[0]}")" + log_note "Usage:" + log_note " $me [flags]" + log_note + log_note "Flags:" + log_note " -h, --help: print this usage" + log_note " -k, --kubeconfig: path to the kubeconfig for your cluster (required)" + log_note " -c, --cluster-type: the type of cluster targeted by the kubeconfig, either 'gke' or 'kind' (required)" + log_note " -r, --image-repo: image registry/repository for Pinniped server container image to deploy (default: $default_image_repo)" + log_note " -t, --image-tag: image tag for Pinniped server container image to deploy (default: $default_image_tag)" + log_note " -d, --image-digest: image digest for Pinniped server container image to deploy. Takes precedence over --image-tag." + log_note " -p, --pinniped-repo: path to pinniped git repo (default: a sibling directory called pinniped)" + exit 1 +fi + +if [[ "$kubeconfig" == "" ]]; then + log_error "no kubeconfig set. -k|--kubeconfig is a required option." + exit 1 +fi + +if [[ "$kubeconfig" != "/"* ]]; then + # If it looks like a relative path then make an an absolute path because we are going to pushd below. + kubeconfig="$(pwd)/$kubeconfig" +fi + +if [[ ! -f "$kubeconfig" ]]; then + log_error "specified kubeconfig file does not exist: $kubeconfig" + exit 1 +fi + +if [[ "$cluster_type" != "gke" && "$cluster_type" != "kind" && "$cluster_type" != "aks" && "$cluster_type" != "eks" ]]; then + log_error "specified cluster type must be 'kind', 'eks', 'aks', or 'gke'. -c|--cluster-type is a required option." + exit 1 +fi + +if [[ "$pinniped_repo" == "" ]]; then + pinniped_repo="$ROOT/../pinniped" + log_note "no pinniped repo path set, defaulting to $pinniped_repo" +fi + +if [[ ! (-d "$pinniped_repo" && -d "$pinniped_repo/deploy" && -d "$pinniped_repo/test/cluster_capabilities") ]]; then + log_error "$pinniped_repo does not appear to contain the pinniped source code repo" +fi + +if [[ "$image_repo" == "" ]]; then + image_repo="$default_image_repo" + log_note "no image repo set, defaulting to $image_repo" +fi + +if [[ "$image_tag" == "" ]]; then + image_tag="$default_image_tag" + log_note "no image tag set, defaulting to $image_tag" +fi + +cluster_capabilities_path="$pinniped_repo/test/cluster_capabilities/$cluster_type.yaml" +if [[ ! -f "$cluster_capabilities_path" ]]; then + log_error "cluster type capabilities file does not exist: $cluster_capabilities_path" + exit 1 +fi + +check_dependency ytt "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS" +check_dependency kapp "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS" +check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' for MacOS" +check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux." +check_dependency openssl "Please install openssl. Should be pre-installed on MacOS." +check_dependency nmap "Please install nmap. e.g. 'brew install nmap' for MacOS" + +# +# Finished checking arguments and dependencies. Now actually do the work... +# +export KUBECONFIG="$kubeconfig" +export IMAGE_TAG="$image_tag" +export IMAGE_REPO="$image_repo" +if [[ "$image_digest" != "" ]]; then + export IMAGE_DIGEST="$image_digest" +fi + +pushd "$pinniped_repo" >/dev/null + +PINNIPED_TEST_CLUSTER_CAPABILITY_FILE="${cluster_capabilities_path}" \ + DEPLOY_LOCAL_USER_AUTHENTICATOR=yes \ + DEPLOY_TEST_TOOLS=yes \ + CONCIERGE_APP_NAME="concierge" \ + CONCIERGE_NAMESPACE="concierge" \ + SUPERVISOR_APP_NAME="supervisor" \ + SUPERVISOR_NAMESPACE="supervisor" \ + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR="yes" \ + "$ROOT/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh" + +popd >/dev/null + +log_note +log_note "🚀 Ready to run integration tests! For example..." + +case "$cluster_type" in +gke | aks | eks) + log_note "KUBECONFIG='$KUBECONFIG' TEST_ENV_PATH='/tmp/integration-test-env' SOURCE_PATH='$pinniped_repo' $ROOT/pipelines/shared-tasks/run-integration-tests/task.sh" + ;; +kind) + log_note "KUBECONFIG='$KUBECONFIG' TEST_ENV_PATH='/tmp/integration-test-env' SOURCE_PATH='$pinniped_repo' START_GCLOUD_PROXY=yes GCP_PROJECT=$PINNIPED_GCP_PROJECT GCP_ZONE=us-central1-b $ROOT/pipelines/shared-tasks/run-integration-tests/task.sh" + ;; +*) + log_error "Huh? Should never get here." + ;; +esac diff --git a/hack/remote-workstation/create.sh b/hack/remote-workstation/create.sh new file mode 100755 index 000000000..6d44f7dc2 --- /dev/null +++ b/hack/remote-workstation/create.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Create a VM called $instance_name with some reasonable compute power and disk. +echo "Creating VM with name $instance_name..." +gcloud compute instances create "$instance_name" \ + --project="$project" --zone="$zone" \ + --machine-type="e2-standard-8" \ + --boot-disk-size="40GB" --boot-disk-type="pd-ssd" --boot-disk-device-name="$instance_name" + +# Give a little time for the server to be ready. +while true; do + sleep 5 + if ! "$here"/ssh.sh ls; then + echo "Waiting for VM to be accessible via ssh..." + else + echo "VM ready!" + break + fi +done + +# Copy the deps script to the new VM. +echo "Copying deps.sh to $instance_name..." +gcloud compute scp "$here"/lib/deps.sh "$instance_user@$instance_name":/tmp \ + --project="$project" --zone="$zone" + +# Run the deps script on the new VM. +"$here"/ssh.sh /tmp/deps.sh diff --git a/hack/remote-workstation/delete.sh b/hack/remote-workstation/delete.sh new file mode 100755 index 000000000..1bb281626 --- /dev/null +++ b/hack/remote-workstation/delete.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" + +# Delete the instance forever. Will prompt for confirmation. +echo "Destroying VM $instance_name..." +gcloud compute instances delete "$instance_name" \ + --delete-disks="all" \ + --project="$project" --zone="$zone" diff --git a/hack/remote-workstation/lib/deps.sh b/hack/remote-workstation/lib/deps.sh new file mode 100755 index 000000000..eca697015 --- /dev/null +++ b/hack/remote-workstation/lib/deps.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exuo pipefail + +# Start in the user's home directory. +cd + +# Install brew pre-reqs documented at https://docs.brew.sh/Homebrew-on-Linux#requirements +sudo apt-get update && sudo sudo apt-get install build-essential procps curl file git -y +# Brew installer command from https://brew.sh. Note that CI=1 turns off an interactive prompt. +CI=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# The installer prints more instructions. It advises you to add brew to profile and install gcc. +echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"' >>$HOME/.profile +eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" +brew install gcc + +# Install go. +brew install go +# On linux go really wants gcc5 to also be installed for some reason. +brew install gcc@5 +# Get the Go linter. +go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.1 + +# Install and configure zsh and plugins. +brew install zsh zsh-history-substring-search +brew install fasd fzf +/home/linuxbrew/.linuxbrew/opt/fzf/install --all --no-bash --no-fish +# Install https://ohmyz.sh +export PATH=$PATH:/home/linuxbrew/.linuxbrew/bin +CHSH=no RUNZSH=no KEEP_ZSHRC=yes sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" +# Install some plugins. +git clone --depth=1 https://github.com/romkatv/powerlevel10k.git "$HOME"/.oh-my-zsh/custom/themes/powerlevel10k +git clone https://github.com/zsh-users/zsh-autosuggestions "$HOME"/.oh-my-zsh/custom/plugins/zsh-autosuggestions +git clone https://github.com/TamCore/autoupdate-oh-my-zsh-plugins "$HOME"/.oh-my-zsh/plugins/autoupdate +git clone https://github.com/zdharma-continuum/fast-syntax-highlighting.git "$HOME"/.oh-my-zsh/custom/plugins/fast-syntax-highlighting +# Get decent .zshrc and .p10k.zsh files. +curl -fsSL https://gist.githubusercontent.com/cfryanr/c84ca9e3fe519b5a7f07426ecc7e3a7c/raw >"$HOME"/.zshrc +curl -fsSL https://gist.githubusercontent.com/cfryanr/3e55b770b9be485bd8671377ce04a3f1/raw >"$HOME"/.p10k.zsh +# Change the user's default shell. +sudo chsh -s /home/linuxbrew/.linuxbrew/bin/zsh "$USER" + +# Get some other useful config files. +curl -fsSL https://gist.githubusercontent.com/cfryanr/153e167a1f2c20934fbc4dc32bbec8f2/raw >"$HOME"/.gitconfig +curl -fsSL https://gist.githubusercontent.com/cfryanr/80ada8af9a78f08b368327401ea80b6c/raw >"$HOME"/.git-authors + +# Install other useful packages. +brew tap homebrew/command-not-found +brew tap vmware-tanzu/carvel +brew install ytt kbld kapp imgpkg kwt vendir +brew install git git-duet/tap/git-duet pre-commit gh +brew install k9s kind kubectl kubectx stern +brew install exa acarl005/homebrew-formulas/ls-go ripgrep procs bat tokei git-delta dust fd httpie chroma +brew install watch htop wget +brew install jesseduffield/lazydocker/lazydocker ctop dive +brew install jq yq +brew install grip +brew install aws-iam-authenticator +brew install step cfssl +brew install nmap +sudo apt-get install apache2-utils rsync -y + +# Install Chrome +wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb +sudo apt install ./google-chrome-stable_current_amd64.deb -y +rm ./google-chrome-stable_current_amd64.deb +google-chrome --version +mkdir "$HOME"/bin + +# Install docker according to procedure from https://docs.docker.com/engine/install/debian/ +sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release -y +curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg +echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null +sudo apt-get update +sudo apt-get install docker-ce docker-ce-cli containerd.io -y +sudo usermod -aG docker "$USER" +sudo systemctl enable docker.service +sudo systemctl enable containerd.service + +# Set up the Pinniped repo +mkdir workspace +pushd workspace +ssh-keyscan -H github.com >> $HOME/.ssh/known_hosts +# This assumes that you used `--ssh-flag=-A` when using `gcloud compute ssh` to log in to the host, +# which will forward your ssh identities. +git clone git@github.com:vmware-tanzu/pinniped.git +pushd pinniped +pre-commit install +popd +popd + +set +x +echo +echo "Successfully installed deps!" diff --git a/hack/remote-workstation/rsync-to-local.sh b/hack/remote-workstation/rsync-to-local.sh new file mode 100755 index 000000000..96d8902a9 --- /dev/null +++ b/hack/remote-workstation/rsync-to-local.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright 2022-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# This is similar to rsync.sh, but with the src and dest flipped at the end. +# It will copy all changes from the remote workstation back to your local machine (overwriting your local changes). + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +SRC_DIR=${SRC_DIR:-"$HOME/workspace/pinniped"} +src_dir_parent=$(dirname "$SRC_DIR") +dest_dir="./workspace/pinniped" +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" +config_file="/tmp/gcp-ssh-config" +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +if [[ ! -d "$SRC_DIR" ]]; then + echo "ERROR: $SRC_DIR does not exist" + exit 1 +fi + +# Get the ssh fingerprints of all the GCP VMs. +gcloud compute config-ssh --ssh-config-file="$config_file" \ + --project="$project" >/dev/null + +cd "$SRC_DIR" +local_commit=$(git rev-parse --short HEAD) +remote_commit=$("$here"/ssh.sh "cd $dest_dir; git rev-parse --short HEAD" 2>/dev/null | tr -dc '[:print:]') + +if [[ -z "$local_commit" || -z "$remote_commit" ]]; then + echo "ERROR: Could not determine currently checked out git commit sha" + exit 1 +fi + +if [[ "$local_commit" != "$remote_commit" ]]; then + echo "ERROR: Local and remote repos are not on the same commit. This is usually a mistake." + echo "Local was $SRC_DIR at *${local_commit}*" + echo "Remote was ${instance_name}:${dest_dir} at *${remote_commit}*" + exit 1 +fi + +# Skip large files because they are probably compiled binaries. +# Also skip other common filenames that we wouldn't need to sync. +echo "Starting rsync from remote to local for $SRC_DIR..." +rsync \ + --progress --delete --archive --compress --human-readable \ + --max-size 200K \ + --exclude .git/ --exclude .idea/ --exclude .DS_Store --exclude '*.test' --exclude '*.out' \ + --rsh "ssh -F $config_file" \ + "${instance_user}@${instance_name}.${zone}.${project}:$dest_dir" "$src_dir_parent" diff --git a/hack/remote-workstation/rsync.sh b/hack/remote-workstation/rsync.sh new file mode 100755 index 000000000..76cad1d87 --- /dev/null +++ b/hack/remote-workstation/rsync.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Copyright 2021 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +SRC_DIR=${SRC_DIR:-"$HOME/workspace/pinniped"} +dest_dir="./workspace" +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" +config_file="/tmp/gcp-ssh-config" +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +if [[ ! -d "$SRC_DIR" ]]; then + echo "ERROR: $SRC_DIR does not exist" + exit 1 +fi + +# Get the ssh fingerprints of all the GCP VMs. +gcloud compute config-ssh --ssh-config-file="$config_file" \ + --project="$project" >/dev/null + +cd "$SRC_DIR" +local_commit=$(git rev-parse --short HEAD) +remote_commit=$("$here"/ssh.sh "cd $dest_dir/pinniped; git rev-parse --short HEAD" 2>/dev/null | tr -dc '[:print:]') + +if [[ -z "$local_commit" || -z "$remote_commit" ]]; then + echo "ERROR: Could not determine currently checked out git commit sha" + exit 1 +fi + +if [[ "$local_commit" != "$remote_commit" ]]; then + echo "ERROR: Local and remote repos are not on the same commit. This is usually a mistake." + echo "Local was $SRC_DIR at *${local_commit}*" + echo "Remote was ${instance_name}:${dest_dir}/pinniped at *${remote_commit}*" + exit 1 +fi + +# Skip large files because they are probably compiled binaries. +# Also skip other common filenames that we wouldn't need to sync. +echo "Starting rsync for $SRC_DIR..." +rsync \ + --progress --delete --archive --compress --human-readable \ + --max-size 200K \ + --exclude .git/ --exclude .idea/ --exclude .DS_Store --exclude '*.test' --exclude '*.out' \ + --rsh "ssh -F $config_file" \ + "$SRC_DIR" "${instance_user}@${instance_name}.${zone}.${project}:$dest_dir" diff --git a/hack/remote-workstation/ssh.sh b/hack/remote-workstation/ssh.sh new file mode 100755 index 000000000..ab55e09a2 --- /dev/null +++ b/hack/remote-workstation/ssh.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" + +# Run ssh with identities forwarded so you can use them with git on the remote host. +# Optionally run an arbitrary command on the remote host. +# By default, start an interactive session. +gcloud compute ssh --ssh-flag=-A "$instance_user@$instance_name" \ + --project="$project" --zone="$zone" -- "$@" diff --git a/hack/remote-workstation/start.sh b/hack/remote-workstation/start.sh new file mode 100755 index 000000000..27a9e8ad9 --- /dev/null +++ b/hack/remote-workstation/start.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" + +# Start an instance which was previously stopped to save money. +echo "Starting VM $instance_name..." +gcloud compute instances start "$instance_name" \ + --project="$project" --zone="$zone" diff --git a/hack/remote-workstation/stop.sh b/hack/remote-workstation/stop.sh new file mode 100755 index 000000000..e2a7412cb --- /dev/null +++ b/hack/remote-workstation/stop.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +instance_name="${REMOTE_INSTANCE_NAME:-${USER}}" +project="$PINNIPED_GCP_PROJECT" +zone="us-central1-b" + +# Stop the instance, to save money, in a way that it can be restarted. +echo "Stopping VM $instance_name..." +gcloud compute instances stop "$instance_name" \ + --project="$project" --zone="$zone" diff --git a/hack/run-integration-tests.sh b/hack/run-integration-tests.sh new file mode 100755 index 000000000..5a0dffa4e --- /dev/null +++ b/hack/run-integration-tests.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# This script will prepare to run the integration tests and then run them. +# Is is a wrapper for prepare-for-integration-tests.sh to make it convenient +# to run the integration tests, potentially running them repeatedly. + +set -euo pipefail + +help=no +skip_build=no +delete_kind_cluster=no + +PARAMS="" +while (("$#")); do + case "$1" in + -h | --help) + help=yes + shift + ;; + -s | --skip-build) + skip_build=yes + shift + ;; + -c | --from-clean-cluster) + delete_kind_cluster=yes + shift + ;; + -*) + echo "Error: Unsupported flag $1" >&2 + exit 1 + ;; + *) + PARAMS="$PARAMS $1" + shift + ;; + esac +done +eval set -- "$PARAMS" + +if [[ "$help" == "yes" ]]; then + me="$(basename "${BASH_SOURCE[0]}")" + echo "Usage:" + echo " $me [flags] [path/to/pinniped]" + echo + echo " path/to/pinniped default: \$PWD ($PWD)" + echo + echo "Flags:" + echo " -h, --help: print this usage" + echo " -s, --skip-build: reuse the most recently built image of the app instead of building" + echo " -c, --from-clean-cluster: delete and rebuild the kind cluster before running tests" + exit 1 +fi + +pinniped_path="${1-$PWD}" +cd "$pinniped_path" || exit 1 + +if [[ ! -f Dockerfile || ! -d deploy ]]; then + echo "$pinniped_path does not appear to be the path to the source code repo directory" + exit 1 +fi + +if ! command -v kind >/dev/null; then + echo "Please install kind. e.g. 'brew install kind' for MacOS" + exit 1 +fi +if [[ "$delete_kind_cluster" == "yes" ]]; then + echo "Deleting running kind clusters to prepare a clean slate..." + "$pinniped_path"/hack/kind-down.sh +fi + +if [[ "$skip_build" == "yes" ]]; then + "$pinniped_path"/hack/prepare-for-integration-tests.sh --skip-build +else + "$pinniped_path"/hack/prepare-for-integration-tests.sh +fi + +source /tmp/integration-test-env + +ulimit -n 512 + +echo +echo "Running integration tests..." +go test -race -v -count 1 -timeout 0 ./test/integration +echo "ALL INTEGRATION TESTS PASSED" diff --git a/hack/setup-fly.sh b/hack/setup-fly.sh new file mode 100755 index 000000000..3eb267472 --- /dev/null +++ b/hack/setup-fly.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Define some env vars +source "$script_dir/fly-helpers.sh" + +# Install the fly cli if needed +if [[ ! -f "$FLY_CLI" ]]; then + curl -fL "$CONCOURSE_URL/api/v1/cli?arch=amd64&platform=darwin" -o "$FLY_CLI" + chmod 755 "$FLY_CLI" +fi + +if ! $FLY_CLI targets | tr -s ' ' | cut -f1 -d ' ' | grep -q "$CONCOURSE_TARGET"; then + # Create the target if needed + $FLY_CLI --target "$CONCOURSE_TARGET" login \ + --team-name "$CONCOURSE_TEAM" --concourse-url "$CONCOURSE_URL" +else + # Login if needed + if ! $FLY_CLI --target "$CONCOURSE_TARGET" status; then + $FLY_CLI --target "$CONCOURSE_TARGET" login + fi +fi + +# Upgrade fly if needed +$FLY_CLI --target "$CONCOURSE_TARGET" sync diff --git a/infra/README.md b/infra/README.md new file mode 100644 index 000000000..6cb66fc07 --- /dev/null +++ b/infra/README.md @@ -0,0 +1,60 @@ +# Installing and operating Concourse + +Concourse is made up of a web deployment a worker deployment. + +## Terraform + +We use Terraform to create and update the IaaS infrastructure on which we run all the Concourse components. +This infrastructure must be created before deploying the corresponding Concourse components. + +### Infrastructure Providers + +We use Google Cloud for the infrastructure. + +### Running Terraform + +See [infra/terraform/gcloud/README.md](./terraform/gcloud/README.md) for details of using Terraform +to create or update the Google Cloud infrastructure for Concourse. This infrastructure will be used +to run the web and internal workers. + +## Bootstrapping Secrets (after Terraform) + +Before deploying Concourse for the first time, the +[infra/concourse-install/bootstrap-secrets.sh](./concourse-install/bootstrap-secrets.sh) +script must be used to auto-generate some values and store them in a new secret in the Secrets Manager. +This script only needs to be run once. + +1. Create a github oauth client as described in https://concourse-ci.org/github-auth.html. + The callback URI should be set to `https://ci.pinniped.dev/sky/issuer/callback`. + Take note of the client ID and client secret for use in the next step. +2. Run `GITHUB_CLIENT_ID= GITHUB_CLIENT_SECRET= ./bootstrap-secrets.sh`. + This will create a secret in the GCP Secrets Manager which includes the GitHub client info + along with some auto-generated secrets. + +## Web Deployment + +The "brains" of Concourse is its web deployment. It can be created and updated by running the +[infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh) +script on your laptop. + +## Worker Deployments + +We run our workers on the same GKE cluster where we run the web component. + +See [infra/concourse-install/*-internal-workers.sh](./concourse-install) for scripts to deploy/update the workers, +scale the workers, and view the workers. + +These workers can also be scaled by the jobs in the `concourse-workers` pipeline. + +## Upgrading Concourse + +To upgrade each deployment to a new version of Concourse: + +1. If any infrastructure updates are needed, follow the terraform instructions again. +2. Change the version of the Helm Chart in the source code of the script used to create each deployment, + and then run each script to upgrade the deployment. Note that this will scale the internal workers deployment + back to its default number of replicas. + 1. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh) + 2. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-internal-workers.sh) +3. Commit and push those script changes. +4. Trigger the CI jobs to scale the internal workers back to the desired number as needed. diff --git a/infra/concourse-install/bootstrap-secrets.sh b/infra/concourse-install/bootstrap-secrets.sh new file mode 100755 index 000000000..bf685fee4 --- /dev/null +++ b/infra/concourse-install/bootstrap-secrets.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# Require two env vars. +if [[ -z "${GITHUB_CLIENT_ID:-}" ]]; then + echo "GITHUB_CLIENT_ID env var must be set" + exit 1 +fi +if [[ -z "${GITHUB_CLIENT_SECRET:-}" ]]; then + echo "GITHUB_CLIENT_SECRET env var must be set" + exit 1 +fi + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +# Check pre-reqs. +if ! command -v gcloud &>/dev/null; then + echo "Please install the gcloud CLI" + exit +fi +if ! command -v yq &>/dev/null; then + echo "Please install the yq CLI" + exit +fi +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +# Create a temporary directory for secrets, cleaned up at the end of this script. +trap 'rm -rf "$TEMP_DIR"' EXIT +TEMP_DIR=$(mktemp -d) || exit 1 + +# Create the three keys required to install the Concourse web component. +# See https://github.com/concourse/concourse-chart/tree/master#secrets +docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t rsa -f /keys/session-signing-key +docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/worker-key +docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/host-key +# Create an extra keypair for our external workers so they can use a different private key +# to avoid sharing the private key of the internal workers to other Kubernetes clusters. +docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/external-worker-key + +# Create an encryption key for DB encryption at rest. +printf "%s" "$(openssl rand -base64 24)" >"$TEMP_DIR/encryption-key" + +# Write a tmp yaml file which bundles together all of the secrets from above. +# The structure of the keys in this file matches the concourse helm chart's values.yaml inputs, +# except for .secrets.externalWorkerKey which is our own custom key. +SECRETS_FILE="$TEMP_DIR/secrets.yaml" +echo "# This secret is auto-generated by infra/concourse-install/bootstrap-secrets.sh" >"$SECRETS_FILE" +yq -i e ".secrets.hostKey = \"$(cat "$TEMP_DIR/host-key")\"" "$SECRETS_FILE" # TSA host key +yq -i e ".secrets.hostKeyPub = \"$(cat "$TEMP_DIR/host-key.pub")\"" "$SECRETS_FILE" # TSA host key pub +yq -i e ".secrets.sessionSigningKey = \"$(cat "$TEMP_DIR/session-signing-key")\"" "$SECRETS_FILE" +yq -i e ".secrets.workerKey = \"$(cat "$TEMP_DIR/worker-key")\"" "$SECRETS_FILE" +yq -i e ".secrets.externalWorkerKey = \"$(cat "$TEMP_DIR/external-worker-key")\"" "$SECRETS_FILE" +# Put both public keys into the workerKeyPub secret, one on each line. +yq -i e ".secrets.workerKeyPub = \"$(cat "$TEMP_DIR/worker-key.pub" "$TEMP_DIR/external-worker-key.pub")\"" "$SECRETS_FILE" +yq -i e ".secrets.encryptionKey = \"$(cat "$TEMP_DIR/encryption-key")\"" "$SECRETS_FILE" +yq -i e ".secrets.githubClientId = \"$GITHUB_CLIENT_ID\"" "$SECRETS_FILE" +yq -i e ".secrets.githubClientSecret = \"$GITHUB_CLIENT_SECRET\"" "$SECRETS_FILE" + +# Save the tmp yaml file into the GCP Secrets Manager for later use. +gcloud secrets create concourse-install-bootstrap \ + --data-file "$SECRETS_FILE" \ + --project "$PINNIPED_GCP_PROJECT" diff --git a/infra/concourse-install/delete-concourse-internal-workers.sh b/infra/concourse-install/delete-concourse-internal-workers.sh new file mode 100755 index 000000000..2a29dcebb --- /dev/null +++ b/infra/concourse-install/delete-concourse-internal-workers.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script deletes the concourse worker from our GKE environment using Helm. + +HELM_RELEASE_NAME="concourse-workers" + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +if ! command -v gcloud &>/dev/null; then + echo "Please install the gcloud CLI" + exit +fi +if ! command -v yq &>/dev/null; then + echo "Please install the yq CLI" + exit +fi +if ! command -v kubectl &>/dev/null; then + echo "Please install the kubectl CLI" + exit +fi +if ! command -v helm &>/dev/null; then + echo "Please install the helm CLI" + exit +fi +if ! command -v terraform &>/dev/null; then + echo "Please install the terraform CLI" + exit +fi +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +# Create a temporary directory for secrets, cleaned up at the end of this script. +trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT +DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1 + +TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml" + +# Get the output values from terraform. +pushd "$script_dir/../terraform/gcloud" >/dev/null +terraform output --json >"$TERRAFORM_OUTPUT_FILE" +popd >/dev/null + +CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE") +PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE") +ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE") + +# Download the admin kubeconfig for the cluster. +export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml" +gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE" +chmod 0600 "$KUBECONFIG" + +# Dump out the cluster info for diagnostic purposes. +kubectl cluster-info + +# Delete the helm chart. +helm uninstall -n concourse-worker "$HELM_RELEASE_NAME" \ + --debug \ + --wait diff --git a/infra/concourse-install/deploy-concourse-internal-workers.sh b/infra/concourse-install/deploy-concourse-internal-workers.sh new file mode 100755 index 000000000..2599bc69b --- /dev/null +++ b/infra/concourse-install/deploy-concourse-internal-workers.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script deploys the concourse worker component into our GKE environment using Helm +# and secrets from GCP and Terraform. + +HELM_RELEASE_NAME="concourse-workers" + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +if ! command -v gcloud &>/dev/null; then + echo "Please install the gcloud CLI" + exit +fi +if ! command -v yq &>/dev/null; then + echo "Please install the yq CLI" + exit +fi +if ! command -v kubectl &>/dev/null; then + echo "Please install the kubectl CLI" + exit +fi +if ! command -v helm &>/dev/null; then + echo "Please install the helm CLI" + exit +fi +if ! command -v ytt &>/dev/null; then + echo "Please install the ytt CLI" + exit +fi +if ! command -v terraform &>/dev/null; then + echo "Please install the terraform CLI" + exit +fi +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +# Add/update the concourse helm repository. +helm repo add concourse https://concourse-charts.storage.googleapis.com/ +helm repo update concourse + +# Create a temporary directory for secrets, cleaned up at the end of this script. +trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT +DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1 + +TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml" + +# Get the output values from terraform. +pushd "$script_dir/../terraform/gcloud" >/dev/null +terraform output --json >"$TERRAFORM_OUTPUT_FILE" +popd >/dev/null + +CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE") +PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE") +ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE") + +# Download the admin kubeconfig for the cluster. +export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml" +gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE" +chmod 0600 "$KUBECONFIG" + +# Download some secrets. These were created once by bootstrap-secrets.sh. +BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml" +gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE" + +TSA_HOST_KEY_PUB=$(yq eval '.secrets.hostKeyPub' "$BOOTSTRAP_SECRETS_FILE") +WORKER_PRIVATE_KEY=$(yq eval '.secrets.workerKey' "$BOOTSTRAP_SECRETS_FILE") + +# Dump out the cluster info for diagnostic purposes. +kubectl cluster-info + +# Some of the configuration options used below were inspired by how HushHouse runs on GKE. +# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/workers/values.yaml + +# Install/upgrade the helm chart. +# These settings are documented in https://github.com/concourse/concourse-chart/blob/master/values.yaml +# Note that `--version` chooses the version of the concourse/concourse chart. Each version of the chart +# chooses which version of Concourse to install by defaulting the value for `imageTag` in its values.yaml file. +helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \ + --version 17.3.1 \ + --debug \ + --install \ + --wait \ + --create-namespace \ + --namespace concourse-worker \ + --values "$script_dir/internal-workers/values-workers.yaml" \ + --set concourse.worker.tsa.publicKey="$TSA_HOST_KEY_PUB" \ + --set concourse.worker.tsa.workerPrivateKey="$WORKER_PRIVATE_KEY" \ + --set secrets.workerKey="$WORKER_PRIVATE_KEY" \ + --set secrets.hostKeyPub="$TSA_HOST_KEY_PUB" \ + --post-renderer "$script_dir/internal-workers/ytt-helm-postrender-workers.sh" + +# By default, it will not be possible for the autoscaler to scale down to one node. +# The autoscaler logs will show that the kube-dns pod cannot be moved. See +# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios +# for how to view and interpret the autoscaler logs. +# This seems to be the workaround for the "no.scale.down.node.pod.kube.system.unmovable" error +# that we were getting for the kube-dns pod in the logs. +kubectl create poddisruptionbudget kube-dns-pdb \ + --namespace=kube-system \ + --selector k8s-app=kube-dns \ + --max-unavailable 1 \ + --dry-run=client -o yaml | kubectl apply -f - diff --git a/infra/concourse-install/deploy-concourse-web.sh b/infra/concourse-install/deploy-concourse-web.sh new file mode 100755 index 000000000..bfbd79c17 --- /dev/null +++ b/infra/concourse-install/deploy-concourse-web.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script deploys the concourse web component into our GKE environment using Helm +# and secrets from GCP and Terraform. + +HELM_RELEASE_NAME="concourse-web" + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +if ! command -v gcloud &>/dev/null; then + echo "Please install the gcloud CLI" + exit +fi +if ! command -v yq &>/dev/null; then + echo "Please install the yq CLI" + exit +fi +if ! command -v kubectl &>/dev/null; then + echo "Please install the kubectl CLI" + exit +fi +if ! command -v helm &>/dev/null; then + echo "Please install the helm CLI" + exit +fi +if ! command -v ytt &>/dev/null; then + echo "Please install the ytt CLI" + exit +fi +if ! command -v terraform &>/dev/null; then + echo "Please install the terraform CLI" + exit +fi +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + echo "Please run \`gcloud auth login\`" + exit 1 +fi + +# Add/update the concourse helm repository. +helm repo add concourse https://concourse-charts.storage.googleapis.com/ +helm repo update concourse + +# Create a temporary directory for secrets, cleaned up at the end of this script. +trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT +DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1 + +TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml" + +# Get the output values from terraform. +pushd "$script_dir/../terraform/gcloud" >/dev/null +terraform output --json >"$TERRAFORM_OUTPUT_FILE" +popd >/dev/null + +CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE") +PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE") +ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE") +WEB_IP_ADDRESS=$(yq eval '.web-ip.value' "$TERRAFORM_OUTPUT_FILE") +WEB_HOSTNAME=$(yq eval '.web-hostname.value' "$TERRAFORM_OUTPUT_FILE") +DB_IP_ADDRESS=$(yq eval '.database-ip.value' "$TERRAFORM_OUTPUT_FILE") +DB_USERNAME=$(yq eval '.database-username.value' "$TERRAFORM_OUTPUT_FILE") +DB_PASSWORD=$(yq eval '.database-password.value' "$TERRAFORM_OUTPUT_FILE") +DB_CA_CERT=$(yq eval '.database-ca-cert.value' "$TERRAFORM_OUTPUT_FILE") +DB_CLIENT_CERT=$(yq eval '.database-cert.value' "$TERRAFORM_OUTPUT_FILE") +DB_CLIENT_KEY=$(yq eval '.database-private-key.value' "$TERRAFORM_OUTPUT_FILE") + +# Download the admin kubeconfig for the cluster. +export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml" +gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE" +chmod 0600 "$KUBECONFIG" + +# Download some secrets. These were created once by bootstrap-secrets.sh. +BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml" +gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE" + +# Dump out the cluster info for diagnostic purposes. +kubectl cluster-info + +# Some of the configuration options used below were inspired by how HushHouse runs on GKE. +# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/hush-house/values.yaml + +# Install/upgrade the helm chart. +# These settings are documented in https://github.com/concourse/concourse-chart/blob/master/values.yaml +# Note that `--version` chooses the version of the concourse/concourse chart. Each version of the chart +# chooses which version of Concourse to install by defaulting the value for `imageTag` in its values.yaml file. +helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \ + --version 17.3.1 \ + --debug \ + --install \ + --wait \ + --create-namespace \ + --namespace concourse-web \ + --values "$script_dir/web/values-web.yaml" \ + --values "$BOOTSTRAP_SECRETS_FILE" \ + --set web.service.api.loadBalancerIP="$WEB_IP_ADDRESS" \ + --set web.service.workerGateway.loadBalancerIP="$WEB_IP_ADDRESS" \ + --set concourse.web.externalUrl="https://$WEB_HOSTNAME" \ + --set concourse.web.postgres.host="$DB_IP_ADDRESS" \ + --set secrets.postgresUser="$DB_USERNAME" \ + --set secrets.postgresPassword="$DB_PASSWORD" \ + --set secrets.postgresCaCert="$DB_CA_CERT" \ + --set secrets.postgresClientCert="$DB_CLIENT_CERT" \ + --set secrets.postgresClientKey="$DB_CLIENT_KEY" \ + --post-renderer "$script_dir/web/ytt-helm-postrender-web.sh" + +# By default, it will not be possible for the autoscaler to scale down to one node. +# The autoscaler logs will show that the kube-dns pod cannot be moved. See +# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios +# for how to view and interpret the autoscaler logs. +# This seems to be the workaround for the "no.scale.down.node.pod.kube.system.unmovable" error +# that we were getting for the kube-dns pod in the logs. +kubectl create poddisruptionbudget kube-dns-pdb \ + --namespace=kube-system \ + --selector k8s-app=kube-dns \ + --max-unavailable 1 \ + --dry-run=client -o yaml | kubectl apply -f - diff --git a/infra/concourse-install/internal-workers/init-container-overlay-workers.yaml b/infra/concourse-install/internal-workers/init-container-overlay-workers.yaml new file mode 100644 index 000000000..817d696d7 --- /dev/null +++ b/infra/concourse-install/internal-workers/init-container-overlay-workers.yaml @@ -0,0 +1,24 @@ +#! Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:overlay", "overlay") + +#! Add resource requests and limits to the initContainer so the whole pod can be assigned "Guaranteed" QoS. +#! All containers must have requests equal to limits, including the initContainers. + +#@overlay/match by=overlay.subset({"kind": "StatefulSet", "metadata":{"name":"concourse-worker"}}), expects=1 +--- +spec: + template: + spec: + initContainers: + - #@overlay/match by="name" + name: concourse-worker-init-rm + #@overlay/match missing_ok=True + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 1000m + memory: 1Gi diff --git a/infra/concourse-install/internal-workers/values-workers.yaml b/infra/concourse-install/internal-workers/values-workers.yaml new file mode 100644 index 000000000..f1953b956 --- /dev/null +++ b/infra/concourse-install/internal-workers/values-workers.yaml @@ -0,0 +1,79 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-worker". +fullnameOverride: concourse + +web: + enabled: false + +postgresql: + enabled: false + +worker: + # In an effort to save money, default to 1 worker. + replicas: 1 + nodeSelector: { cloud.google.com/gke-nodepool: workers-2 } # the name of the nodepool from terraform + hardAntiAffinity: true + minAvailable: 0 + terminationGracePeriodSeconds: 3600 + livenessProbe: + periodSeconds: 60 + failureThreshold: 10 + timeoutSeconds: 45 + resources: + # Inspired by https://github.com/concourse/hush-house/blob/16f52e57c273282ebace68051b0fe9133dc3a04e/deployments/with-creds/workers/values.yaml#L30-L32 + # + # Note that Kubernetes uses Ki (Kibibytes) and Gi (Gibibytes). You can do conversions by doing google + # searches using the more commonly used names for those units, e.g. searching "29061248 KiB to GiB". + # + # Limit to using all available CPUs and most of the available memory in our e2-standard-8 VM nodes. + # According to the "Allocatable" section of the "kubectl describe nodes -l cloud.google.com/gke-nodepool=workers-2" output, + # each node has 29061248 Ki, which is equal to 27.7149658203 Gi of memory allocatable, + # and each node has 7910m cpu allocatable. + # + # By making our requests equal to our limits, we should be assigned "Guaranteed" QoS. + # But we need to leave enough space for all other pods' requests too, because GKE runs several pods on each node automatically. + # The first node in the node pool has the most pods scheduled on it, so we will choose our values based on the first node + # by looking at its "Allocated resources" section of the describe output. + # CPU: + # - On the first node, the other pods' CPU requests total 1324m (16%). + # - The available CPU for our pod is 7910m allocatable - 1324m allocated = 6586m remaining. + # Memory: + # - On the first node, the other pods' memory requests total 1394740096 (bytes) (4%) = 1.298952937126 Gi. + # - The available memory for our pod is 27.7149658203 Gi - 1.298952937126 Gi = 26.4160128832 Gi. + # However, Google can change these values over time, so we need to leave a little extra room + # in case Google's pods take a little more later. + # + # In order for the pod to be assigned "Guaranteed" QoS, all the containers need to + # have requests equal to limits, so the initContainer also has similar settings applied + # by the init-container-overlay.yaml overlay. + limits: + cpu: 6480m + memory: 26Gi + requests: + cpu: 6480m + memory: 26Gi + +persistence: + worker: + size: 375Gi + storageClass: premium-rwo + +concourse: + worker: + # rebalanceInterval: 2h + baggageclaim: + driver: overlay + healthcheckTimeout: 40s + runtime: containerd + containerd: + # networkPool: "10.254.0.0/16" + # maxContainers is usually set to 250, but increasing it to see if we can squeeze more from each worker. + maxContainers: 300 + restrictedNetworks: + - 169.254.169.254/32 + tsa: + hosts: + # This service name must match the name decided by the web deployment + - concourse-web-worker-gateway.concourse-web.svc.cluster.local:2222 diff --git a/infra/concourse-install/internal-workers/ytt-helm-postrender-workers.sh b/infra/concourse-install/internal-workers/ytt-helm-postrender-workers.sh new file mode 100755 index 000000000..3c3918ca3 --- /dev/null +++ b/infra/concourse-install/internal-workers/ytt-helm-postrender-workers.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +ytt -f "$script_dir/init-container-overlay-workers.yaml" -f- diff --git a/infra/concourse-install/scale-down-concourse-internal-workers.sh b/infra/concourse-install/scale-down-concourse-internal-workers.sh new file mode 100755 index 000000000..c11ebea41 --- /dev/null +++ b/infra/concourse-install/scale-down-concourse-internal-workers.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see +# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios +# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node. + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +CLUSTER="pinniped-concourse" +PROJECT="$PINNIPED_GCP_PROJECT" +ZONE="us-central1-c" +STATEFULSET="concourse-worker" +NAMESPACE="concourse-worker" +NODEPOOL="workers-2" + +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + gcloud auth activate-service-account \ + "$GCP_USERNAME" \ + --key-file <(echo "$GCP_JSON_KEY") \ + --project "$PROJECT" +fi + +trap 'rm -rf "$TEMP_DIR"' EXIT +TEMP_DIR=$(mktemp -d) || exit 1 + +# Download the admin kubeconfig for the GKE cluster created by terraform. +export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml" +gcloud container clusters get-credentials "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" + +current=$(kubectl get statefulset "$STATEFULSET" \ + --namespace "$NAMESPACE" \ + --output=jsonpath="{.spec.replicas}" \ + --kubeconfig="${KUBECONFIG}") + +desired=$((current - 1)) + +echo "current scale=$current" +echo "desired scale=$desired" + +minNodes=$(gcloud container clusters describe "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" \ + --format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.minNodeCount") + +if [[ $desired -lt $minNodes ]]; then + echo "ERROR: will not scale below the cluster autoscaler limit of $minNodes for the node pool" + exit 1 +fi + +kubectl scale \ + --current-replicas=$current \ + --replicas=$desired \ + --kubeconfig="${KUBECONFIG}" \ + --namespace "$NAMESPACE" \ + "statefulset/$STATEFULSET" diff --git a/infra/concourse-install/scale-print-concourse-internal-workers.sh b/infra/concourse-install/scale-print-concourse-internal-workers.sh new file mode 100755 index 000000000..b47228d64 --- /dev/null +++ b/infra/concourse-install/scale-print-concourse-internal-workers.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see +# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios +# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node. + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +CLUSTER="pinniped-concourse" +PROJECT="$PINNIPED_GCP_PROJECT" +ZONE="us-central1-c" +STATEFULSET="concourse-worker" +NAMESPACE="concourse-worker" +NODEPOOL="workers-2" +TARGET="pinniped" + +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + gcloud auth activate-service-account \ + "$GCP_USERNAME" \ + --key-file <(echo "$GCP_JSON_KEY") \ + --project "$PINNIPED_GCP_PROJECT" +fi + +trap 'rm -rf "$TEMP_DIR"' EXIT +TEMP_DIR=$(mktemp -d) || exit 1 + +# Download the admin kubeconfig for the GKE cluster created by terraform. +export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml" +gcloud container clusters get-credentials "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" + +current=$(kubectl get statefulset "$STATEFULSET" \ + --namespace "$NAMESPACE" \ + --output=jsonpath="{.spec.replicas}" \ + --kubeconfig="${KUBECONFIG}") + +minNodes=$(gcloud container clusters describe "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" \ + --format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.minNodeCount") + +maxNodes=$(gcloud container clusters describe "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" \ + --format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.maxNodeCount") + +echo +echo "current scale=$current, min=$minNodes, max=$maxNodes" + +echo +echo "Current pods..." +kubectl get pods \ + --output wide \ + --namespace "$NAMESPACE" \ + --kubeconfig="${KUBECONFIG}" + +echo +echo "Volumes usage for current pods..." +kubectl get pods \ + --namespace "${NAMESPACE}" \ + --kubeconfig="${KUBECONFIG}" \ + --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' \ + | xargs -n1 -I {} bash -c "echo \"{}: \" && kubectl exec {} -n ${NAMESPACE} -c concourse-worker --kubeconfig ${KUBECONFIG} -- df -ah /concourse-work-dir | sed \"s|^| |\"" \ + +echo +echo "Current nodes in nodepool $NODEPOOL..." +kubectl get nodes \ + -l cloud.google.com/gke-nodepool=$NODEPOOL \ + --kubeconfig="${KUBECONFIG}" + +echo +echo "Current fly workers..." +if ! fly --target "$TARGET" status >/dev/null; then + fly --target "$TARGET" login +fi +fly --target "$TARGET" workers + +echo "" +echo "Note: If the number of pods, nodes, and fly workers are not all the same," +echo "and some time has passed since you have changed the scale, then something may be wrong." diff --git a/infra/concourse-install/scale-up-concourse-internal-workers.sh b/infra/concourse-install/scale-up-concourse-internal-workers.sh new file mode 100755 index 000000000..7b8250226 --- /dev/null +++ b/infra/concourse-install/scale-up-concourse-internal-workers.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see +# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios +# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node. + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +CLUSTER="pinniped-concourse" +PROJECT="$PINNIPED_GCP_PROJECT" +ZONE="us-central1-c" +STATEFULSET="concourse-worker" +NAMESPACE="concourse-worker" +NODEPOOL="workers-2" + +if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then + gcloud auth activate-service-account \ + "$GCP_USERNAME" \ + --key-file <(echo "$GCP_JSON_KEY") \ + --project "$PROJECT" +fi + +trap 'rm -rf "$TEMP_DIR"' EXIT +TEMP_DIR=$(mktemp -d) || exit 1 + +# Download the admin kubeconfig for the GKE cluster created by terraform. +export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml" +gcloud container clusters get-credentials "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" + +current=$(kubectl get statefulset "$STATEFULSET" \ + --namespace "$NAMESPACE" \ + --output=jsonpath="{.spec.replicas}" \ + --kubeconfig="${KUBECONFIG}") + +desired=$((current + 1)) + +echo "current scale=$current" +echo "desired scale=$desired" + +maxNodes=$(gcloud container clusters describe "$CLUSTER" \ + --project "$PROJECT" \ + --zone "$ZONE" \ + --format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.maxNodeCount") + +if [[ $desired -gt $maxNodes ]]; then + echo "ERROR: will not scale above the cluster autoscaler limit of $maxNodes for the node pool" + exit 1 +fi + +kubectl scale \ + --current-replicas=$current \ + --replicas=$desired \ + --kubeconfig="${KUBECONFIG}" \ + --namespace "$NAMESPACE" \ + "statefulset/$STATEFULSET" diff --git a/infra/concourse-install/web/init-container-overlay-web.yaml b/infra/concourse-install/web/init-container-overlay-web.yaml new file mode 100644 index 000000000..482d25f58 --- /dev/null +++ b/infra/concourse-install/web/init-container-overlay-web.yaml @@ -0,0 +1,24 @@ +#! Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:overlay", "overlay") + +#! Add resource requests and limits to the initContainer so the whole pod can be assigned "Guaranteed" QoS. +#! All containers must have requests equal to limits, including the initContainers. + +#@overlay/match by=overlay.subset({"kind": "Deployment", "metadata":{"name":"concourse-web"}}), expects=1 +--- +spec: + template: + spec: + initContainers: + - #@overlay/match by="name" + name: concourse-migration + #@overlay/match missing_ok=True + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 1000m + memory: 1Gi diff --git a/infra/concourse-install/web/values-web.yaml b/infra/concourse-install/web/values-web.yaml new file mode 100644 index 000000000..3791914be --- /dev/null +++ b/infra/concourse-install/web/values-web.yaml @@ -0,0 +1,119 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-web". +fullnameOverride: concourse + +worker: + enabled: false + +postgresql: + enabled: false + +web: + # In an effort to save money, default to 1 web server. + replicas: 1 + nodeSelector: { cloud.google.com/gke-nodepool: generic-1 } # the name of the nodepool from terraform + additionalAffinities: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: concourse-web # see comment on fullnameOverride above + release: concourse-web # this must be the same name as the helm release in deploy-concourse-web.sh + service: + api: + type: LoadBalancer + workerGateway: + type: LoadBalancer + # The first node in the generic-1 nodepool (using e2-highcpu-8 VM) has lots of GKE and Kubernetes pods running on it. + # According to the "allocatable" section of the "kubectl get node -o yaml" output, the first node has + # 7910m cpu and 6179084 Ki memory (which is about 5.893 Gi). + # The total requests from the GKE/Kube pods is 1017m cpu and 1046766976 (bytes) memory (which is about 0.975 Gi). + # The difference between the allocatable memory and the requested memory is 4.918 Gi, so we will request slightly + # less than that to leave a little headroom on the cluster in case some of these pods get upgraded and decide + # to request more in the future. Similarly, the cpu difference is 6893m. + resources: + requests: + cpu: 6400m + memory: 4.7Gi + limits: + cpu: 6400m + memory: 4.7Gi + strategy: + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + +concourse: + web: + localAuth: + enabled: false + auth: + mainTeam: + localUser: "" + github: + # From https://concourse-ci.org/github-auth.html... + # "Note that the client must be created under an organization if you want to authorize users based on + # organization/team membership. In addition, the GitHub application must have at least read access on + # the organization's members. If the client is created under a personal account, only individual users + # can be authorized." + # We requested that the owner of the vmware-tanzu org create an OIDC client for us. + # Because it was created in the org, it should have permissions to read team memberships during a login. + # The client ID and client secret are stored in the bootstrap secret in the Secrets Manager + # (see infra/README.md for more info about the bootstrap secret). + team: vmware-tanzu:pinniped-owners + github: + enabled: true + bindPort: 80 + clusterName: pinniped-ci + # containerPlacementStrategy: random + defaultDaysToRetainBuildLogs: 60 + # enableAcrossStep: true + # enablePipelineInstances: true + # enableBuildAuditing: true + # enableContainerAuditing: true + # enableGlobalResources: true + # enableJobAuditing: true + # enablePipelineAuditing: true + # enableResourceAuditing: true + # enableSystemAuditing: true + # enableTeamAuditing: true + # enableVolumeAuditing: true + # enableWorkerAuditing: true + enableCacheStreamedVolumes: true + enableResourceCausality: true + enableRedactSecrets: true + baggageclaimResponseHeaderTimeout: 10m + encryption: + enabled: true + kubernetes: + keepNamespaces: true + letsEncrypt: + enabled: true + acmeURL: "https://acme-v02.api.letsencrypt.org/directory" + tls: + enabled: true + bindPort: 443 + postgres: + database: atc + sslmode: verify-ca + gc: + # See https://concourse-ci.org/performance-tuning.html#concourse_gc_failed_grace_period. + # Defaults to 5 days. This means that when lots of jobs in a pipeline fail, all of those + # containers will stick around for 5 days, causing you to quickly reach the max containers + # per worker and start seeing orange jobs complaining that they cannot start containers. + # Its nice for debugging when you can hijack a container of a job that failed a long time + # ago, but it comes at the cost of needing more workers to hold on to those containers. + failedGracePeriod: 10m + # logLevel: debug + tsa: + # logLevel: debug + +secrets: + localUsers: "" diff --git a/infra/concourse-install/web/ytt-helm-postrender-web.sh b/infra/concourse-install/web/ytt-helm-postrender-web.sh new file mode 100755 index 000000000..8699dab11 --- /dev/null +++ b/infra/concourse-install/web/ytt-helm-postrender-web.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +ytt -f "$script_dir/init-container-overlay-web.yaml" -f- diff --git a/infra/terraform/gcloud/.terraform.lock.hcl b/infra/terraform/gcloud/.terraform.lock.hcl new file mode 100644 index 000000000..148f9ac2e --- /dev/null +++ b/infra/terraform/gcloud/.terraform.lock.hcl @@ -0,0 +1,64 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "5.11.0" + constraints = "~> 5.0" + hashes = [ + "h1:Ezg3fsY84CB/2P00ZwQEECuIfJd6UUYs5tIptN2kzsE=", + "h1:FV7t+G3+rJD3aN5Yr+FY8/cDG+FKhFCt8XvLJkqCcY8=", + "zh:444815a900947de3cb4e3aac48bf8cd98009130c110e3cee1e72698536046fee", + "zh:45ca22a2f44fe67f9ff71528dcd93493281e34bff7791f5eb24c86e76f32956d", + "zh:53e2e33824743e9e620454438de803de10572bd79ce16034abfc91ab1877be7a", + "zh:5eb699830a07320f896a3da7cdee169ab5fa356a6d38858b8b9337f1e4e30904", + "zh:6837cd8d9d63503e138ec3ebf52f850ca786824a3b0d5b9dfecec303f1656ca6", + "zh:7adde1fe2fc8966812bcbfeb24580cbb53f2f5301bd793eaa70ad753ba6b2d3c", + "zh:92052fd7ec776cd221f19db4624ae4ed1550c95c2984c9f3b6c54cea8896812b", + "zh:b0305aab81220b7d5711225224f5baad8fc6f5dd3a8199073966af8a151e2932", + "zh:e7b5aa624d89664803dd545f261261806b7f6607c19f6ceaf61f9011b0e02e63", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fbc04244e1f666ce0320b4eb0efb9cae460a5d688fc039637c8fe745665c19e5", + "zh:ff3553298929629ae2ad77000b3e050394e2f00c04e90a24268e3dfe6a6342c4", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "5.11.0" + constraints = "~> 5.0" + hashes = [ + "h1:izjzT8NnaePEXKbLQa+D4gw7HUYvK7NgIL3TJ23rjZk=", + "h1:teaW5i4Za+IHUuYSg3mRwJwVdLwKbND9UdCwG4MBvkY=", + "zh:0efa82e6fe2c83bd5280c3009db1c3acc9cdad3c9419b6ec721fbefc9f832449", + "zh:371df01e4f38b828195d115c9a8bebddebec4d34e9ef74cf3a79161da08e44b2", + "zh:5089967c420c5e4a4ba0d4c8c6ca344c7bb2476ec928f8319856260eacded369", + "zh:798a65c79386d356d6a097de680f4ece8982daae1cb0e10d6c53b383efef45f0", + "zh:90178911ac0e624c69a54a992fb3425ef09fdfb3e34b496ad7b6e168e80d4e0c", + "zh:b59c60f8479b8f0c8e91a93a4e707ce6d17c8e50e2f5afaf1d9a03c03cfedbf8", + "zh:c7f946282d80223ab3a6b284c22e4b53ffcd7b1a02449bb95a350007f30c87dc", + "zh:cd60e76987c2fdce2c84219eaff9390cd135f88aa9a27bc4d79a8fd4a8d09622", + "zh:de06bfa0393206c0253ebdea70821cb3b08ef87d5d4844be3ae463abfb4e1884", + "zh:de494bad600cca78986ce63d1018f5dbc1a1fcc2d4c41c94c15d5346f2b0dd1e", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f97a8b6e83e0083dcb42a87e8e418ab33f12d641f9cdfdc92d154ba7fd7398fb", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.0" + hashes = [ + "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=", + "h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=", + "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", + "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", + "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", + "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", + "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", + "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", + "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", + "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", + "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", + "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", + ] +} diff --git a/infra/terraform/gcloud/README.md b/infra/terraform/gcloud/README.md new file mode 100644 index 000000000..f17399714 --- /dev/null +++ b/infra/terraform/gcloud/README.md @@ -0,0 +1,28 @@ +# Terraform for Google Cloud Concourse Infrastructure + +We used Terraform to create the infra needed for running our own Concourse. +This includes things like a GKE cluster, a static IP, a DNS entry, and a Postgres database. + +NOTE: Do not manually edit these resources using the Google Cloud UI, API, or CLI. +Instead, please update the `.tf` files and follow the below steps again. + +To run Terraform to create or update the infrastructure: +1. Install the `gcloud` CLI and authenticate as yourself, if you haven't already. +2. Use `gcloud auth application-default login` if you haven't already. This is not optional. +3. Install terraform if you haven't already. Use brew or brew install tfenv and then use tfenv. + At the time of writing this README, we were using Terraform v1.6.6. +4. cd into this directory: `cd infra/terraform/gcloud` +5. Run `terraform init`, if you haven't already for this directory. +6. Run `terraform fmt`. +7. Run `terraform validate`. +8. Run `TF_VAR_project=$PINNIPED_GCP_PROJECT terraform apply`. + This assumes that you have already exported an env var called `PINNIPED_GCP_PROJECT` + whose value is the name of the GCP project. + +If you do not need to run `terraform apply` because someone else has already done that, +then you still need to follow the above directions up to and including running `terraform init` +to set up terraform on your computer. + +To delete the entire Concourse deployment and all its related cloud infrastructure, +use `terraform destroy`. There is no way to undo this action. This will also delete the Cloud SQL +database which contains all CI job history. diff --git a/infra/terraform/gcloud/address/main.tf b/infra/terraform/gcloud/address/main.tf new file mode 100644 index 000000000..b7f3feeac --- /dev/null +++ b/infra/terraform/gcloud/address/main.tf @@ -0,0 +1,26 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Use our pre-existing DNS zone. +data "google_dns_managed_zone" "main" { + name = var.dns-zone +} + +# Reserved external static IPv4 address for the `web` instances. +# This is needed so that we can have a static IP for `ci.pinniped.dev`. +resource "google_compute_address" "main" { + name = "${var.subdomain}-${var.dns-zone}" +} + +# Make a DNS A record for our subdomain to point at our new static IP. +resource "google_dns_record_set" "main" { + name = "${var.subdomain}.${data.google_dns_managed_zone.main.dns_name}" + type = "A" + ttl = 300 + + managed_zone = data.google_dns_managed_zone.main.name + + rrdatas = [ + google_compute_address.main.address, + ] +} diff --git a/infra/terraform/gcloud/address/outputs.tf b/infra/terraform/gcloud/address/outputs.tf new file mode 100644 index 000000000..5f0222659 --- /dev/null +++ b/infra/terraform/gcloud/address/outputs.tf @@ -0,0 +1,10 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +output "ip" { + value = google_compute_address.main.address +} + +output "hostname" { + value = trimsuffix(google_dns_record_set.main.name, ".") +} diff --git a/infra/terraform/gcloud/address/variables.tf b/infra/terraform/gcloud/address/variables.tf new file mode 100644 index 000000000..067910e03 --- /dev/null +++ b/infra/terraform/gcloud/address/variables.tf @@ -0,0 +1,12 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +variable "dns-zone" { + description = "Name of the DNS zone" + type = string +} + +variable "subdomain" { + description = "Subdomain under the DNS zone to register" + type = string +} diff --git a/infra/terraform/gcloud/cluster/main.tf b/infra/terraform/gcloud/cluster/main.tf new file mode 100644 index 000000000..c0397f073 --- /dev/null +++ b/infra/terraform/gcloud/cluster/main.tf @@ -0,0 +1,124 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +module "vpc" { + source = "./vpc" + + name = var.name + region = var.region + + vms-cidr = "10.10.0.0/16" + pods-cidr = "10.11.0.0/16" + services-cidr = "10.12.0.0/16" +} + +resource "google_service_account" "default" { + account_id = "${var.name}-sa" + display_name = "GKE Node SA for ${var.name}" +} + +# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster +resource "google_container_cluster" "main" { + # Allow "terraform destroy" for this cluster. + deletion_protection = false + + name = var.name + location = var.zone + + network = module.vpc.name + subnetwork = module.vpc.subnet-name + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. This allows node pools to be added and removed without recreating the cluster. + # So we create the smallest possible default node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + min_master_version = "1.30.4-gke.1348000" + + ip_allocation_policy { + cluster_secondary_range_name = module.vpc.pods-range-name + services_secondary_range_name = module.vpc.services-range-name + } + + addons_config { + http_load_balancing { + disabled = false + } + + horizontal_pod_autoscaling { + disabled = false + } + + network_policy_config { + disabled = false + } + } + + maintenance_policy { + daily_maintenance_window { + start_time = "03:00" + } + } + + network_policy { + provider = "CALICO" + enabled = true + } + + workload_identity_config { + workload_pool = "${var.project}.svc.id.goog" + } + + cluster_autoscaling { + autoscaling_profile = "OPTIMIZE_UTILIZATION" + } +} + +# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_node_pool +resource "google_container_node_pool" "main" { + provider = google-beta + for_each = var.node-pools + + location = var.zone + cluster = google_container_cluster.main.name + name = each.key + + autoscaling { + min_node_count = each.value.min + max_node_count = each.value.max + } + + management { + auto_repair = true + auto_upgrade = each.value.auto-upgrade + } + + node_config { + preemptible = each.value.preemptible + machine_type = each.value.machine-type + local_ssd_count = each.value.local-ssds + disk_size_gb = each.value.disk-size + disk_type = each.value.disk-type + image_type = each.value.image + + workload_metadata_config { + mode = "GKE_METADATA" + } + + metadata = { + disable-legacy-endpoints = "true" + } + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + timeouts { + create = "30m" + delete = "30m" + } +} diff --git a/infra/terraform/gcloud/cluster/outputs.tf b/infra/terraform/gcloud/cluster/outputs.tf new file mode 100644 index 000000000..b6fdd9b0c --- /dev/null +++ b/infra/terraform/gcloud/cluster/outputs.tf @@ -0,0 +1,10 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +output "vpc-uri" { + value = module.vpc.uri +} + +output "cluster-name" { + value = google_container_cluster.main.name +} diff --git a/infra/terraform/gcloud/cluster/variables.tf b/infra/terraform/gcloud/cluster/variables.tf new file mode 100644 index 000000000..7f724df11 --- /dev/null +++ b/infra/terraform/gcloud/cluster/variables.tf @@ -0,0 +1,25 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +variable "name" { + default = "" + description = "The name of the GKE cluster to be created." +} + +variable "zone" { + default = "" + description = "The zone where the cluster should live." +} + +variable "region" { + default = "" + description = "The region in which the cluster should be located at." +} + +variable "project" { + description = "The Google GCP project to host the resources." +} + +variable "node-pools" { + description = "A list of node pool configurations to create and assign to the cluster." +} diff --git a/infra/terraform/gcloud/cluster/vpc/main.tf b/infra/terraform/gcloud/cluster/vpc/main.tf new file mode 100644 index 000000000..7cc0cf5b5 --- /dev/null +++ b/infra/terraform/gcloud/cluster/vpc/main.tf @@ -0,0 +1,69 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +resource "google_compute_network" "main" { + name = var.name + + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "main" { + name = "${var.name}-sn-1" + + ip_cidr_range = var.vms-cidr + network = google_compute_network.main.name + region = var.region + + secondary_ip_range = [ + { + range_name = var.pods-range-name + ip_cidr_range = var.pods-cidr + }, + { + range_name = var.services-range-name + ip_cidr_range = var.services-cidr + } + ] +} + +resource "google_compute_firewall" "internal-ingress" { + name = "${var.name}-internal" + + network = google_compute_network.main.name + direction = "INGRESS" + + source_ranges = [ + var.vms-cidr, + var.pods-cidr, + var.services-cidr, + ] + + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + } + + allow { + protocol = "udp" + } +} + +resource "google_compute_firewall" "external-ingress" { + name = "${var.name}-external" + network = google_compute_network.main.name + direction = "INGRESS" + + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + ports = ["22"] + } + + source_ranges = ["0.0.0.0/0"] +} diff --git a/infra/terraform/gcloud/cluster/vpc/outputs.tf b/infra/terraform/gcloud/cluster/vpc/outputs.tf new file mode 100644 index 000000000..acbfb0579 --- /dev/null +++ b/infra/terraform/gcloud/cluster/vpc/outputs.tf @@ -0,0 +1,22 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +output "name" { + value = google_compute_network.main.name +} + +output "subnet-name" { + value = google_compute_subnetwork.main.name +} + +output "pods-range-name" { + value = var.pods-range-name +} + +output "services-range-name" { + value = var.services-range-name +} + +output "uri" { + value = google_compute_network.main.self_link +} diff --git a/infra/terraform/gcloud/cluster/vpc/variables.tf b/infra/terraform/gcloud/cluster/vpc/variables.tf new file mode 100644 index 000000000..bc17a84c5 --- /dev/null +++ b/infra/terraform/gcloud/cluster/vpc/variables.tf @@ -0,0 +1,32 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +variable "name" { + description = "TODO" +} + +variable "region" { + description = "TODO" +} + +variable "vms-cidr" { + description = "TODO" +} + +variable "pods-cidr" { + description = "TODO" +} + +variable "pods-range-name" { + default = "pods-range" + description = "TODO" +} + +variable "services-cidr" { + description = "TODO" +} + +variable "services-range-name" { + default = "services-range" + description = "TODO" +} diff --git a/infra/terraform/gcloud/database/main.tf b/infra/terraform/gcloud/database/main.tf new file mode 100644 index 000000000..d73639b3e --- /dev/null +++ b/infra/terraform/gcloud/database/main.tf @@ -0,0 +1,78 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# A piece of randomization that gets consumed by the +# `google_sql_database_instance` resources. +# +# This is needed in order to facilitate creating and recreating instances +# without waiting for the whole period that GCP requires to reuse name. +resource "random_id" "instance-name" { + byte_length = 4 +} + +resource "google_sql_database_instance" "main" { + name = "${var.name}-${random_id.instance-name.hex}" + region = var.region + database_version = "POSTGRES_15" + + settings { + availability_type = "ZONAL" + disk_autoresize = true + disk_type = "PD_SSD" + tier = "db-custom-${var.cpus}-${var.memory_mb}" + + database_flags { + name = "log_min_duration_statement" + value = "-1" + } + + database_flags { + name = "max_connections" + value = var.max_connections + } + + ip_configuration { + ipv4_enabled = "true" + require_ssl = "true" + + authorized_networks { + name = "all" + value = "0.0.0.0/0" + } + } + + backup_configuration { + enabled = true + start_time = "23:00" + } + + location_preference { + zone = var.zone + } + } +} + +resource "google_sql_database" "atc" { + name = "atc" + + instance = google_sql_database_instance.main.name + charset = "UTF8" + collation = "en_US.UTF8" +} + +resource "random_string" "password" { + length = 32 + special = true +} + +resource "google_sql_user" "user" { + name = "atc" + + instance = google_sql_database_instance.main.name + password = random_string.password.result +} + +resource "google_sql_ssl_cert" "cert" { + common_name = "atc" + instance = google_sql_database_instance.main.name +} diff --git a/infra/terraform/gcloud/database/outputs.tf b/infra/terraform/gcloud/database/outputs.tf new file mode 100644 index 000000000..cbf2815e9 --- /dev/null +++ b/infra/terraform/gcloud/database/outputs.tf @@ -0,0 +1,30 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +output "username" { + value = google_sql_user.user.name +} + +output "password" { + sensitive = true + value = random_string.password.result +} + +output "ip" { + value = google_sql_database_instance.main.ip_address[0].ip_address +} + +output "ca-cert" { + sensitive = true + value = google_sql_database_instance.main.server_ca_cert[0].cert +} + +output "cert" { + sensitive = true + value = google_sql_ssl_cert.cert.cert +} + +output "private-key" { + sensitive = true + value = google_sql_ssl_cert.cert.private_key +} diff --git a/infra/terraform/gcloud/database/variables.tf b/infra/terraform/gcloud/database/variables.tf new file mode 100644 index 000000000..00019a6e3 --- /dev/null +++ b/infra/terraform/gcloud/database/variables.tf @@ -0,0 +1,37 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +variable "name" { + default = "" + description = "The name of the CloudSQL instance to create (ps.: a random ID is appended to this name)" +} + +variable "memory_mb" { + default = "" + description = "Number of MBs to assign to the CloudSQL instance." +} + +variable "cpus" { + default = "" + description = "Number of CPUs to assign to the CloudSQL instance." +} + +variable "zone" { + default = "" + description = "The zone where this instance is supposed to be created at (e.g., us-central1-a)" +} + +variable "region" { + default = "" + description = "The region where the instance is supposed to be created at (e.g., us-central1)" +} + +variable "disk_size_gb" { + default = "" + description = "The disk size in GB's (e.g. 10)" +} + +variable "max_connections" { + default = "" + description = "The max number of connections allowed by postgres" +} diff --git a/infra/terraform/gcloud/gcp.tf b/infra/terraform/gcloud/gcp.tf new file mode 100644 index 000000000..dddc8e6f3 --- /dev/null +++ b/infra/terraform/gcloud/gcp.tf @@ -0,0 +1,33 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +terraform { + required_providers { + google = "~> 5" + google-beta = "~> 5" + } + + backend "gcs" { + # By not providing credentials, you will use your current identity from the gcloud CLI. + # credentials = "gcp.json" + bucket = "tanzu-user-authentication-terraform-state" + prefix = "pinniped-concourse-jan2024" + } +} + +provider "google" { + # By not providing credentials, you will use your current identity from the gcloud CLI. + # credentials = "gcp.json" + project = var.project + region = var.region + zone = var.zone +} + +# `google-beta` provides us access to GCP's beta APIs. +provider "google-beta" { + # By not providing credentials, you will use your current identity from the gcloud CLI. + # credentials = "gcp.json" + project = var.project + region = var.region + zone = var.zone +} diff --git a/infra/terraform/gcloud/main.tf b/infra/terraform/gcloud/main.tf new file mode 100644 index 000000000..584724310 --- /dev/null +++ b/infra/terraform/gcloud/main.tf @@ -0,0 +1,61 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# The static IP and related DNS entry. +module "address" { + source = "./address" + + dns-zone = var.dns-zone + subdomain = var.subdomain +} + +# Instantiates the GKE Kubernetes cluster. +module "cluster" { + source = "./cluster" + + name = "pinniped-concourse" + project = var.project + region = var.region + zone = var.zone + + node-pools = { + + "generic-1" = { + auto-upgrade = true + disk-size = "50" + disk-type = "pd-ssd" + image = "COS_CONTAINERD" + local-ssds = 0 + machine-type = "e2-highcpu-8" # 8 vCPU and 4 GB memory + max = 2 + min = 1 + preemptible = false + version = "1.30.4-gke.1348000" + }, + + "workers-2" = { + auto-upgrade = true + disk-size = "100" + disk-type = "pd-ssd" + image = "UBUNTU_CONTAINERD" + local-ssds = 0 + machine-type = "c3-standard-8" # 8 vCPU and 32 GB memory + max = 5 + min = 1 + preemptible = false + version = "1.30.4-gke.1348000" + }, + } +} + +# Creates the CloudSQL Postgres database to be used by the Concourse deployment. +module "database" { + source = "./database" + + name = "pinniped-concourse" + cpus = "4" + memory_mb = "7680" + region = var.region + zone = var.zone + max_connections = "300" +} diff --git a/infra/terraform/gcloud/outputs.tf b/infra/terraform/gcloud/outputs.tf new file mode 100644 index 000000000..56cca4dc9 --- /dev/null +++ b/infra/terraform/gcloud/outputs.tf @@ -0,0 +1,54 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +output "web-ip" { + value = module.address.ip +} + +output "web-hostname" { + value = module.address.hostname +} + +output "database-ip" { + value = module.database.ip +} + +output "database-ca-cert" { + sensitive = true + value = module.database.ca-cert +} + +output "database-username" { + value = module.database.username +} + +output "database-password" { + sensitive = true + value = module.database.password +} + +output "database-cert" { + sensitive = true + value = module.database.cert +} + +output "database-private-key" { + sensitive = true + value = module.database.private-key +} + +output "project" { + value = var.project +} + +output "region" { + value = var.region +} + +output "zone" { + value = var.zone +} + +output "cluster-name" { + value = module.cluster.cluster-name +} diff --git a/infra/terraform/gcloud/variables.tf b/infra/terraform/gcloud/variables.tf new file mode 100644 index 000000000..2c7637209 --- /dev/null +++ b/infra/terraform/gcloud/variables.tf @@ -0,0 +1,28 @@ +# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +variable "project" { + description = "The Google GCP project to host the resources" + type = string + # Please provide the value of this variable by setting the env var TF_VAR_project for all terraform commands. +} + +variable "region" { + description = "The cloud provider region where the resources created" + default = "us-central1" +} + +variable "zone" { + description = "The cloud provider zone where the resources are created" + default = "us-central1-c" +} + +variable "dns-zone" { + description = "The default DNS zone to use when creating subdomains" + default = "pinniped-dev" +} + +variable "subdomain" { + description = "Subdomain under the DNS zone to register" + default = "ci" +} diff --git a/pipelines/cleanup-aws/pipeline.yml b/pipelines/cleanup-aws/pipeline.yml new file mode 100644 index 000000000..68c52e72e --- /dev/null +++ b/pipelines/cleanup-aws/pipeline.yml @@ -0,0 +1,68 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/9/9d/Seal_cleaning_itself.jpg + +resources: + + - name: pinniped-ci + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + +jobs: + + # Here is a recommendation for how to use these tasks to clean up our AWS + # environment. + # + # 1. Run dryrun-cleanup-aws and look at the listed resources to make sure you aren't + # deleting anything that you don't want to. + # 2. Run danger-danger-cleanup-aws to actually delete resources. + # 3. Run list-all-aws-resources to view ALL resources left in our AWS account. + # Consider if we want to add any of those resources to our cleanup task's config. + + - name: danger-danger-cleanup-aws + public: false # hide logs + serial: true + plan: + - get: pinniped-ci + - task: cleanup-aws + file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml + params: + AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number)) + AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id)) + AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret)) + AWS_ROLE_ARN: ((aws-cleanup-role-arn)) + REALLY_CLEANUP: "yes" + + - name: dryrun-cleanup-aws + public: false # hide logs + serial: true + plan: + - get: pinniped-ci + - task: preview-cleanup-aws-without-actually-deleting-anything + file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml + params: + AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number)) + AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id)) + AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret)) + AWS_ROLE_ARN: ((aws-cleanup-role-arn)) + + - name: list-all-aws-resources + public: false # hide logs + serial: true + plan: + - get: pinniped-ci + - task: list-all-aws-resources + file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml + params: + AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number)) + AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id)) + AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret)) + AWS_ROLE_ARN: ((aws-cleanup-role-arn)) + ALL_RESOURCES: "yes" diff --git a/pipelines/cleanup-aws/update-pipeline.sh b/pipelines/cleanup-aws/update-pipeline.sh new file mode 100755 index 000000000..3948643b8 --- /dev/null +++ b/pipelines/cleanup-aws/update-pipeline.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" diff --git a/pipelines/concourse-workers/pipeline.yml b/pipelines/concourse-workers/pipeline.yml new file mode 100644 index 000000000..e8fd8f891 --- /dev/null +++ b/pipelines/concourse-workers/pipeline.yml @@ -0,0 +1,122 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://cdn.pixabay.com/photo/2020/09/16/22/09/pool-5577567_1280.jpg + +meta: + + # GCP account info and which zone the workers should be created in and deleted from. + gke_admin_params: &gke_admin_params + INSTANCE_ZONE: us-west1-b + PINNIPED_GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gke-cluster-developer-username)) + GCP_JSON_KEY: ((gke-cluster-developer-json-key)) + + # GCP account info and which zone the workers should be created in and deleted from. + gcp_account_params: &gcp_account_params + INSTANCE_ZONE: us-central1-b + GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gcp-instance-admin-username)) + GCP_JSON_KEY: ((gcp-instance-admin-json-key)) + +resources: + + - name: pinniped-ci + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + + - name: k8s-app-deployer-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-app-deployer + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: gcloud-image + type: registry-image + icon: docker + source: + repository: google/cloud-sdk + tag: slim + + - name: hourly + type: time + icon: calendar-clock + source: + interval: 1h + +# In an effort to save money, no longer automatically scale our workers up and down on a schedule. + +# - name: end-of-business-day +# type: time +# icon: calendar-clock +# source: +# location: America/Los_Angeles +# start: 7:00 PM +# stop: 8:00 PM +# days: [ Monday, Tuesday, Wednesday, Thursday, Friday ] +# +# - name: start-of-business-day +# type: time +# icon: calendar-clock +# source: +# location: America/New_York +# start: 5:30 AM +# stop: 6:30 AM +# days: [ Monday, Tuesday, Wednesday, Thursday, Friday ] + +jobs: + + - name: scale-up-internal-workers + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-ci + - get: k8s-app-deployer-image +# - get: start-of-business-day +# trigger: true + - task: scale-up + timeout: 30m + file: pinniped-ci/pipelines/concourse-workers/scale-up-gke-replicas.yml + image: k8s-app-deployer-image + params: + <<: *gke_admin_params + + - name: scale-down-internal-workers + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-ci + - get: k8s-app-deployer-image +# - get: end-of-business-day +# trigger: true + - task: scale-down + timeout: 30m + file: pinniped-ci/pipelines/concourse-workers/scale-down-gke-replicas.yml + image: k8s-app-deployer-image + params: + <<: *gke_admin_params + + - name: remove-orphaned-vms + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-ci + - get: gcloud-image + - get: hourly + trigger: true + - task: remove-orphaned-kind-cluster-vms + attempts: 2 + timeout: 25m + file: pinniped-ci/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml + image: gcloud-image + params: + <<: *gcp_account_params diff --git a/pipelines/concourse-workers/scale-down-gke-replicas.yml b/pipelines/concourse-workers/scale-down-gke-replicas.yml new file mode 100644 index 000000000..edb0be9f4 --- /dev/null +++ b/pipelines/concourse-workers/scale-down-gke-replicas.yml @@ -0,0 +1,13 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: +- name: pinniped-ci +params: + PINNIPED_GCP_PROJECT: + GCP_SERVICE_ACCOUNT: + GCP_JSON_KEY: +run: + path: pinniped-ci/infra/concourse-install/scale-down-concourse-internal-workers.sh diff --git a/pipelines/concourse-workers/scale-up-gke-replicas.yml b/pipelines/concourse-workers/scale-up-gke-replicas.yml new file mode 100644 index 000000000..4c31df645 --- /dev/null +++ b/pipelines/concourse-workers/scale-up-gke-replicas.yml @@ -0,0 +1,13 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: +- name: pinniped-ci +params: + PINNIPED_GCP_PROJECT: + GCP_SERVICE_ACCOUNT: + GCP_JSON_KEY: +run: + path: pinniped-ci/infra/concourse-install/scale-up-concourse-internal-workers.sh diff --git a/pipelines/concourse-workers/update-pipeline.sh b/pipelines/concourse-workers/update-pipeline.sh new file mode 100755 index 000000000..3948643b8 --- /dev/null +++ b/pipelines/concourse-workers/update-pipeline.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" diff --git a/pipelines/dockerfile-builders/pipeline.yml b/pipelines/dockerfile-builders/pipeline.yml new file mode 100644 index 000000000..809d2fa9a --- /dev/null +++ b/pipelines/dockerfile-builders/pipeline.yml @@ -0,0 +1,1167 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/6/64/Lobo_marino_%28Zalophus_californianus_wollebaeki%29%2C_Punta_Pitt%2C_isla_de_San_Crist%C3%B3bal%2C_islas_Gal%C3%A1pagos%2C_Ecuador%2C_2015-07-24%2C_DD_11.JPG + +groups: + - name: other + jobs: + - build-k8s-app-deployer-dockerfile + - build-deployment-yaml-formatter-dockerfile + - build-integration-test-runner-dockerfile + - build-integration-test-runner-beta-dockerfile + - build-code-coverage-uploader + - build-eks-deployer-dockerfile + - build-pool-trigger-resource-dockerfile + - build-github-pr-resource + - build-gh-cli + - build-crane + - name: test + jobs: + - "build-test-*" + - name: k8s-codegen + jobs: + - "build-k8s-code-generator-*" + +meta: + + # This pipeline has a lot of resources, so it causes a lot of checks. + # The default interval is 1m. We will try checking less often to put less + # load on the workers. + check-every-for-dockerfile: &check-every-for-dockerfile + check_every: 3m + check-every-for-image: &check-every-for-image + check_every: 10m + + # These version numbers should be updated periodically. + codegen-versions: &codegen-versions + # Choose which version of Golang to use in the codegen container images. + BUILD_ARG_GO_VERSION: '1.23.2' + # Choose which version of sigs.k8s.io/controller-tools/cmd/controller-gen to install + # in the codegen container images. + BUILD_ARG_CONTROLLER_GEN_VERSION: 0.16.4 + # Choose which version of github.com/elastic/crd-ref-docs to install in the codegen + # container images. We use a commit sha instead of a release semver because this project + # does not create releases very often. They seem to only release 1-2 times per year, but + # commit to main more often. + BUILD_ARG_CRD_REF_DOCS_COMMIT_SHA: 95ad38c + +resources: + + - name: k8s-app-deployer-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/k8s-app-deployer/Dockerfile ] + + - name: k8s-app-deployer-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-app-deployer + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: deployment-yaml-formatter-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/deployment-yaml-formatter/Dockerfile ] + + - name: deployment-yaml-formatter-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/deployment-yaml-formatter + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: integration-test-runner-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/integration-test-runner/Dockerfile ] + + - name: integration-test-runner-beta-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/integration-test-runner-beta/Dockerfile ] + + - name: integration-test-runner-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/integration-test-runner + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: integration-test-runner-beta-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/integration-test-runner-beta + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: code-coverage-uploader-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/code-coverage-uploader/Dockerfile ] + + - name: code-coverage-uploader-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/code-coverage-uploader + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: pool-trigger-resource-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: + - dockerfiles/pool-trigger-resource/Dockerfile + - "dockerfiles/pool-trigger-resource/assets/*" + + - name: pool-trigger-resource-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/pool-trigger-resource + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.25-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.25 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.26-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.26 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.27-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.27 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.28-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.28 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.29-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.29 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.30-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.30 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-1.31-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.31 + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: k8s-code-generator-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/k8s-code-generator/* ] + + - name: test-forward-proxy-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/test-forward-proxy + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: test-forward-proxy-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/test-forward-proxy/* ] + + - name: test-bitnami-ldap-image-ghcr + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/test-bitnami-ldap + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: test-bitnami-ldap-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/test-bitnami-ldap/Dockerfile ] + + - name: test-dex-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/test-dex + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: test-dex-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/test-dex/Dockerfile ] + + - name: test-cfssl-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/test-cfssl + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: test-cfssl-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/test-cfssl/Dockerfile ] + + - name: test-kubectl-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/test-kubectl + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: test-kubectl-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/test-kubectl/Dockerfile ] + + - name: gh-cli-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/gh-cli + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: gh-cli-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/gh-cli/Dockerfile ] + + - name: crane-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/crane + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: crane-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/crane/Dockerfile ] + + - name: eks-deployer-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + paths: [ dockerfiles/eks-deployer/Dockerfile ] + + - name: eks-deployer-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/eks-deployer + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: github-pr-resource-dockerfile + type: git + icon: github + <<: *check-every-for-dockerfile + source: + uri: https://github.com/pinniped-ci-bot/github-pr-resource.git + branch: pinniped_owners + + - name: github-pr-resource-image + type: registry-image + icon: docker + <<: *check-every-for-image + source: + repository: ((ci-ghcr-registry))/github-pr-resource + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: daily + type: time + icon: calendar-clock + check_every: 10m + source: + location: America/Los_Angeles + start: 2:00 AM + stop: 3:00 AM + days: [ Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday ] + +jobs: + + - name: build-k8s-app-deployer-dockerfile + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-app-deployer-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-app-deployer-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-app-deployer-dockerfile/dockerfiles/k8s-app-deployer + - put: k8s-app-deployer-image + params: + image: image/image.tar + + - name: build-deployment-yaml-formatter-dockerfile + public: true # all logs are publicly visible + serial: true + plan: + - get: deployment-yaml-formatter-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: deployment-yaml-formatter-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: deployment-yaml-formatter-dockerfile/dockerfiles/deployment-yaml-formatter + - put: deployment-yaml-formatter-image + params: + image: image/image.tar + + - name: build-integration-test-runner-dockerfile + public: true # all logs are publicly visible + serial: true + plan: + - get: integration-test-runner-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: integration-test-runner-dockerfile + outputs: + - name: image + run: + path: build + # Do not cache so we get the latest version of Chrome during nightly runs. + #caches: + # - path: cache + params: + CONTEXT: integration-test-runner-dockerfile/dockerfiles/integration-test-runner + - put: integration-test-runner-image + params: + image: image/image.tar + + - name: build-integration-test-runner-beta-dockerfile + public: true # all logs are publicly visible + serial: true + plan: + - get: integration-test-runner-beta-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: integration-test-runner-beta-dockerfile + outputs: + - name: image + run: + path: build + # Do not cache so we get the latest version of Chrome during nightly runs. + #caches: + # - path: cache + params: + CONTEXT: integration-test-runner-beta-dockerfile/dockerfiles/integration-test-runner-beta + - put: integration-test-runner-beta-image + params: + image: image/image.tar + + - name: build-code-coverage-uploader + public: true # all logs are publicly visible + serial: true + plan: + - get: code-coverage-uploader-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: code-coverage-uploader-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: code-coverage-uploader-dockerfile/dockerfiles/code-coverage-uploader + - put: code-coverage-uploader-image + params: + image: image/image.tar + + - name: build-pool-trigger-resource-dockerfile + public: true # all logs are publicly visible + serial: true + plan: + - get: pool-trigger-resource-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pool-trigger-resource-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pool-trigger-resource-dockerfile/dockerfiles/pool-trigger-resource + - put: pool-trigger-resource-image + params: + image: image/image.tar + + - name: build-k8s-code-generator-1.25 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.25.16 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.25-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-k8s-code-generator-1.26 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.26.15 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.26-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-k8s-code-generator-1.27 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.27.16 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.27-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-k8s-code-generator-1.28 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.28.14 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.28-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-k8s-code-generator-1.29 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.29.9 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.29-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-k8s-code-generator-1.30 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.30.5 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.30-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-k8s-code-generator-1.31 + public: true # all logs are publicly visible + serial: true + plan: + - get: k8s-code-generator-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: k8s-code-generator-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: k8s-code-generator-dockerfile/dockerfiles/k8s-code-generator + BUILD_ARG_K8S_PKG_VERSION: 0.31.1 + <<: *codegen-versions + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: k8s-code-generator-1.31-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-test-forward-proxy + public: true # all logs are publicly visible + serial: true + plan: + - get: test-forward-proxy-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: test-forward-proxy-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: test-forward-proxy-dockerfile/dockerfiles/test-forward-proxy + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: test-forward-proxy-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-test-bitnami-ldap + public: true # all logs are publicly visible + serial: true + plan: + - get: test-bitnami-ldap-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: test-bitnami-ldap-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: test-bitnami-ldap-dockerfile/dockerfiles/test-bitnami-ldap + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: test-bitnami-ldap-image-ghcr + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-test-dex + public: true # all logs are publicly visible + serial: true + plan: + - get: test-dex-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: test-dex-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: test-dex-dockerfile/dockerfiles/test-dex + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: test-dex-image + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-test-cfssl + public: true # all logs are publicly visible + serial: true + plan: + - get: test-cfssl-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: test-cfssl-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: test-cfssl-dockerfile/dockerfiles/test-cfssl + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: test-cfssl-image + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-test-kubectl + public: true # all logs are publicly visible + serial: true + plan: + - get: test-kubectl-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: test-kubectl-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: test-kubectl-dockerfile/dockerfiles/test-kubectl + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: test-kubectl-image + get_params: + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: build-eks-deployer-dockerfile + public: true # all logs are publicly visible + serial: true + plan: + - get: eks-deployer-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: eks-deployer-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: eks-deployer-dockerfile/dockerfiles/eks-deployer + - put: eks-deployer-image + params: + image: image/image.tar + + - name: build-github-pr-resource + public: true # all logs are publicly visible + serial: true + plan: + - get: github-pr-resource-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: github-pr-resource-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: github-pr-resource-dockerfile + - put: github-pr-resource-image + params: + image: image/image.tar + + - name: build-crane + public: true # all logs are publicly visible + serial: true + plan: + - get: crane-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: crane-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: crane-dockerfile/dockerfiles/crane + - put: crane-image + params: + image: image/image.tar + + - name: build-gh-cli + public: true # all logs are publicly visible + serial: true + plan: + - get: gh-cli-dockerfile + trigger: true + - get: daily + trigger: true + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: gh-cli-dockerfile + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: gh-cli-dockerfile/dockerfiles/gh-cli + - put: gh-cli-image + params: + image: image/image.tar diff --git a/pipelines/dockerfile-builders/update-pipeline.sh b/pipelines/dockerfile-builders/update-pipeline.sh new file mode 100755 index 000000000..aee2287e6 --- /dev/null +++ b/pipelines/dockerfile-builders/update-pipeline.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" +ensure_time_resource_has_at_least_one_version "$pipeline" daily + +# Make the pipeline visible to non-authenticated users in the web UI. +$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline" diff --git a/pipelines/go-compatibility/pipeline.yml b/pipelines/go-compatibility/pipeline.yml new file mode 100644 index 000000000..e22544212 --- /dev/null +++ b/pipelines/go-compatibility/pipeline.yml @@ -0,0 +1,146 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/6/68/Mirounga_leonina.jpg + +meta: + + build_pinniped: &build_pinniped + config: + platform: linux + inputs: + - name: pinniped-source + run: + path: bash + args: + - "-c" + - | + set -exuo pipefail + go version + cd pinniped-source/ + + # compile all of our code + go build -o /dev/null ./... + + # compile (but don't actually run) all of our tests + go test ./... -run=nothing + +resources: + + - name: daily + type: time + icon: calendar-clock + source: + location: America/Los_Angeles + start: 4:00 AM + stop: 5:00 AM + days: [ Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday ] + + - name: pinniped-source + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: main + private_key: ((source-repo-deploy-key)) + + - name: go-1.22-image + type: registry-image + icon: docker + source: + repository: docker.io/golang + tag: "1.22" + +jobs: + + - name: go-install-cli + public: true # all logs are publicly visible + serial: true + plan: + - get: daily + trigger: true + - task: go-install + config: + platform: linux + image_resource: + type: registry-image + source: + repository: docker.io/golang + run: + path: bash + args: + - "-c" + - | + set -exuo pipefail + go install -v go.pinniped.dev/cmd/pinniped@latest + + # This job attempts to check whether it's possible to depend on our API client submodule. + # It creates a simple test application with go.mod and main.go files, then attempts to compile it. + # + # As of now, this is known to be broken so we've decided to disable this job. + # - name: go-get-submodule + # serial: true + # plan: + # - get: daily + # trigger: true + # - task: go-get + # config: + # platform: linux + # image_resource: + # type: registry-image + # source: + # repository: docker.io/golang + # run: + # path: bash + # args: + # - "-c" + # - | + # set -euo pipefail + # mkdir /work + # cd /work + + # cat << EOF > go.mod + # module testapp + + # go 1.14 + + # require ( + # go.pinniped.dev/generated/1.18/apis v0.0.0-00010101000000-000000000000 + # go.pinniped.dev/generated/1.18/client v0.0.0-20200918195624-2d4d7e588a18 + # ) + + # replace ( + # go.pinniped.dev/generated/1.18/apis v0.0.0-00010101000000-000000000000 => go.pinniped.dev/generated/1.18/apis v0.0.0-20200918195624-2d4d7e588a18 + # ) + # EOF + + # cat << EOF > main.go + # package main + + # import ( + # _ "go.pinniped.dev/generated/1.18/apis/idp/v1alpha1" + # _ "go.pinniped.dev/generated/1.18/client/clientset/versioned" + # ) + + # func main() {} + # EOF + + # head -100 go.mod main.go + # set -x + # go mod download + # go build -o testapp main.go + + - name: go-1.22-compatibility + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: daily + trigger: true + - get: pinniped-source + - get: go-1.22-image + - task: build + image: go-1.22-image + <<: *build_pinniped diff --git a/pipelines/go-compatibility/update-pipeline.sh b/pipelines/go-compatibility/update-pipeline.sh new file mode 100755 index 000000000..3114907b6 --- /dev/null +++ b/pipelines/go-compatibility/update-pipeline.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" +ensure_time_resource_has_at_least_one_version "$pipeline" daily diff --git a/pipelines/kind-node-builder/pipeline.yml b/pipelines/kind-node-builder/pipeline.yml new file mode 100644 index 000000000..64503481d --- /dev/null +++ b/pipelines/kind-node-builder/pipeline.yml @@ -0,0 +1,109 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/2/2b/Grey_seal_animal_halichoerus_grypus.jpg + +meta: + + notify_on_failure: ¬ify_on_failure + on_failure: + put: gchat + timeout: 5m + params: + text: | + Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :( + ${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME} + + # GCP account info and which zone the workers should be created in and deleted from. + gcp_account_params: &gcp_account_params + INSTANCE_ZONE: us-central1-b + GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gcp-instance-admin-username)) + GCP_JSON_KEY: ((gcp-instance-admin-json-key)) + +resource_types: + + - name: google-chat-notify-resource + type: docker-image + source: + repository: springio/google-chat-notify-resource + tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags + # We are only doing pulls of this resource type, but add the username and password to avoid + # hitting a rate limit. Our free account is only allowed to have one access token, so we + # cannot make a read-only token for performing pulls. + username: getpinniped + password: ((getpinniped-dockerhub-image-push-access-token)) + +resources: + + - name: gcloud-image + type: registry-image + icon: docker + source: + repository: google/cloud-sdk + tag: slim + + - name: pinniped-ci + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + + - name: daily + type: time + icon: calendar-clock + source: + location: America/Los_Angeles + start: 1:00 AM + stop: 2:00 AM + days: [ Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday ] + + - name: gchat + type: google-chat-notify-resource + icon: chat-outline + source: + url: ((gchat-project-pinniped-bots-webhook-url)) + +jobs: + + - name: build-kind-node-image-kube-main-latest + public: true # all logs are publicly visible + <<: *notify_on_failure + plan: + - in_parallel: + - get: pinniped-ci + - get: gcloud-image + - get: daily + trigger: true + - task: create-kind-node-builder-vm + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/create-kind-node-builder-vm/task.yml + image: gcloud-image + params: + <<: *gcp_account_params + - task: build-kind-node-image + timeout: 90m + file: pinniped-ci/pipelines/shared-tasks/build-kind-node-image/task.yml + image: gcloud-image + input_mapping: + instance: create-kind-node-builder-vm-output + params: + PUSH_TO_IMAGE_REGISTRY: "ghcr.io" + PUSH_TO_IMAGE_REPO: "pinniped-ci-bot/kind-node-image" + DOCKER_USERNAME: ((ci-ghcr-pusher-username)) + DOCKER_PASSWORD: ((ci-ghcr-pusher-token)) + <<: *gcp_account_params + ensure: + task: remove-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-gce-worker-vm/task.yml + image: gcloud-image + input_mapping: + concourse-worker-pool: create-kind-node-builder-vm-output + params: + <<: *gcp_account_params diff --git a/pipelines/kind-node-builder/update-pipeline.sh b/pipelines/kind-node-builder/update-pipeline.sh new file mode 100755 index 000000000..aee2287e6 --- /dev/null +++ b/pipelines/kind-node-builder/update-pipeline.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" +ensure_time_resource_has_at_least_one_version "$pipeline" daily + +# Make the pipeline visible to non-authenticated users in the web UI. +$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline" diff --git a/pipelines/main/pipeline.yml b/pipelines/main/pipeline.yml new file mode 100644 index 000000000..31a148407 --- /dev/null +++ b/pipelines/main/pipeline.yml @@ -0,0 +1,2788 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/2/27/Walruses_odobenus_rosmarus_largest_pinniped_marine_mammals.jpg + +meta: + + # GKE account info and which zone the clusters should be created in and deleted from. + gke_account_params: &gke_account_params + CLUSTER_ZONE: us-central1-c + GCP_PROJECT: ((gcp-project-name)) + GCP_SERVICE_ACCOUNT: ((gke-test-pool-manager-username)) + GCP_JSON_KEY: ((gke-test-pool-manager-json-key)) + + # GCP account info and which zone the kind workers should be created in and deleted from. + gcp_account_params: &gcp_account_params + INSTANCE_ZONE: us-central1-b # which zone the kind worker VMs should be created in and deleted from + GCP_ZONE: us-central1-b + GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gcp-instance-admin-username)) + GCP_JSON_KEY: ((gcp-instance-admin-json-key)) + + # AWS account info and which zone the workers should be created in and deleted from. + aws_account_params: &aws_account_params + AWS_DEFAULT_REGION: us-west-2 + AWS_ACCESS_KEY_ID: ((aws-concourse-ci-iam-key-id)) + AWS_SECRET_ACCESS_KEY: ((aws-concourse-ci-iam-key-secret)) + AWS_ROLE_ARN: ((aws-concourse-ci-role-arn)) + + azure_account_params: &azure_account_params + AZURE_REGION: westus2 + AZURE_TENANT: ((azure-bot-tenant-id)) + AZURE_RESOURCE_GROUP: pinniped-ci + AZURE_USERNAME: ((azure-bot-app-id)) + AZURE_PASSWORD: ((azure-bot-password)) + + cluster_diagnostics_task: &cluster_diagnostics_task + file: pinniped-ci/pipelines/shared-tasks/export-cluster-diagnostics/task.yml + image: integration-test-runner-image + timeout: 15m + params: + GCS_BUCKET: pinniped-ci-archive + GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gcp-cluster-diagnostic-uploader-username)) + GCP_JSON_KEY: ((gcp-cluster-diagnostic-uploaded-json-key)) + + notify_on_failure: ¬ify_on_failure + on_failure: + put: gchat + timeout: 5m + params: + text: | + Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :( + ${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME} + + notify_on_success: ¬ify_on_success + on_success: + put: gchat + timeout: 5m + params: + text: | + Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` succeeded + ${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME} + + # Decides which specific patch versions of k8s we would like to deploy when creating kind cluster workers. + # It should be safe to update the patch version numbers here whenever new versions come out. + # As the old workers get recycled, they will be replaced with new workers which use the patch + # version specified here. The latest available versions can be found here: + # https://hub.docker.com/r/kindest/node/tags + # Note that the available versions of kind node images lag behind the available versions of Kubernetes itself, + # so always check the tags using the above link. + kube_version_v1-21-x: &kube_version_v1-21-x + KUBE_VERSION: v1.21.14 + kube_version_v1-22-x: &kube_version_v1-22-x + KUBE_VERSION: v1.22.17 + kube_version_v1-23-x: &kube_version_v1-23-x + KUBE_VERSION: v1.23.17 + kube_version_v1-24-x: &kube_version_v1-24-x + KUBE_VERSION: v1.24.17 + kube_version_v1-25-x: &kube_version_v1-25-x + KUBE_VERSION: v1.25.16 + kube_version_v1-26-x: &kube_version_v1-26-x + KUBE_VERSION: v1.26.15 + kube_version_v1-27-x: &kube_version_v1-27-x + KUBE_VERSION: v1.27.16 + kube_version_v1-28-x: &kube_version_v1-28-x + KUBE_VERSION: v1.28.13 + kube_version_v1-29-x: &kube_version_v1-29-x + KUBE_VERSION: v1.29.8 + kube_version_v1-30-x: &kube_version_v1-30-x + KUBE_VERSION: v1.30.4 + kube_version_v1-31-x: &kube_version_v1-31-x + KUBE_VERSION: v1.31.1 + kube_version_k8s-main: &kube_version_k8s-main + KUBE_VERSION: "k8s-main" + KIND_NODE_IMAGE: "ghcr.io/pinniped-ci-bot/kind-node-image:latest" + + # Whenever we add a new Kubernetes version for kind clusters, please remember update these + # two aliases to reference the oldest and latest Kubernetes versions currently in use. + oldest_kind_kube_version: &oldest_kind_kube_version + <<: *kube_version_v1-21-x + latest_kind_kube_version: &latest_kind_kube_version + <<: *kube_version_v1-31-x + + okta_integration_env_vars: &okta_integration_env_vars + OKTA_CLI_CALLBACK: ((okta-cli-callback)) + OKTA_CLI_CLIENT_ID: ((okta-cli-client-id)) + OKTA_ADDITIONAL_SCOPES: ((okta-additional-scopes)) + OKTA_USERNAME_CLAIM: ((okta-username-claim)) + OKTA_GROUPS_CLAIM: ((okta-groups-claim)) + OKTA_ISSUER: ((okta-issuer)) + OKTA_PASSWORD: ((okta-password)) + OKTA_SUPERVISOR_CLIENT_ID: ((okta-supervisor-client-id)) + OKTA_SUPERVISOR_CLIENT_SECRET: ((okta-supervisor-client-secret)) + OKTA_USERNAME: ((okta-username)) + OKTA_GROUPS: ((okta-groups)) + + jumpcloud_integration_env_vars: &jumpcloud_integration_env_vars + JUMPCLOUD_LDAP_HOST: ((jumpcloud-ldap-host)) + JUMPCLOUD_LDAP_STARTTLS_ONLY_HOST: ((jumpcloud-ldap-start-tls-only-host)) + JUMPCLOUD_LDAP_BIND_ACCOUNT_USERNAME: ((jumpcloud-ldap-bind-account-username)) + JUMPCLOUD_LDAP_BIND_ACCOUNT_PASSWORD: ((jumpcloud-ldap-bind-account-password)) + JUMPCLOUD_LDAP_USERS_SEARCH_BASE: ((jumpcloud-ldap-users-search-base)) + JUMPCLOUD_LDAP_GROUPS_SEARCH_BASE: ((jumpcloud-ldap-groups-search-base)) + JUMPCLOUD_LDAP_USER_DN: ((jumpcloud-ldap-user-dn)) + JUMPCLOUD_LDAP_USER_CN: ((jumpcloud-ldap-user-cn)) + JUMPCLOUD_LDAP_USER_PASSWORD: ((jumpcloud-ldap-user-password)) + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME: ((jumpcloud-ldap-user-unique-id-attribute-name)) + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE: ((jumpcloud-ldap-user-unique-id-attribute-value)) + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_NAME: ((jumpcloud-ldap-user-email-attribute-name)) + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_VALUE: ((jumpcloud-ldap-user-email-attribute-value)) + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_DN: ((jumpcloud-ldap-expected-direct-groups-dn)) + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_CN: ((jumpcloud-ldap-expected-direct-groups-cn)) + JUMPCLOUD_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN: ((jumpcloud-ldap-expected-direct-posix-groups-cn)) + + active_directory_integration_env_vars: &active_directory_integration_env_vars + TEST_ACTIVE_DIRECTORY: "yes" + AWS_AD_HOST: ((aws-ad-host)) + AWS_AD_DOMAIN: ((aws-ad-domain)) + AWS_AD_BIND_ACCOUNT_USERNAME: ((aws-ad-bind-account-username)) + AWS_AD_BIND_ACCOUNT_PASSWORD: ((aws-ad-bind-account-password)) + AWS_AD_USER_USER_PRINCIPAL_NAME: ((aws-ad-user-userprincipalname)) + AWS_AD_USER_PASSWORD: ((aws-ad-user-password)) + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME: ((aws-ad-user-unique-id-attribute-name)) + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE: ((aws-ad-user-unique-id-attribute-value)) + AWS_AD_USER_EXPECTED_GROUPS_DN: ((aws-ad-expected-direct-groups-dn)) + AWS_AD_USER_EXPECTED_GROUPS_CN: ((aws-ad-expected-direct-groups-cn)) + AWS_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME: ((aws-ad-expected-direct-and-nested-groups-samaccountnames)) + AWS_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES: ((aws-ad-expected-direct-and-nested-groups-samaccountname-domainnames)) + AWS_AD_LDAPS_CA_BUNDLE: ((aws-ad-ca-data)) + AWS_AD_DEACTIVATED_USER_SAMACCOUNTNAME: ((aws-ad-deactivated-user-samaccountname)) + AWS_AD_DEACTIVATED_USER_PASSWORD: ((aws-ad-deactivated-user-password)) + AWS_AD_USER_EMAIL_ATTRIBUTE_VALUE: ((aws-ad-user-email-attribute-value)) + AWS_AD_DEFAULTNAMINGCONTEXT_DN: ((aws-ad-defaultnamingcontext)) + AWS_AD_USERS_DN: ((aws-ad-users-dn)) + + github_integration_env_vars: &github_integration_env_vars + PINNIPED_TEST_GITHUB_APP_CLIENT_ID: ((github-app-client-id)) + PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET: ((github-app-client-secret)) + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID: ((github-oauth-app-client-id)) + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET: ((github-oauth-app-client-secret)) + PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL: ((github-oauth-app-allowed-callback-url)) + PINNIPED_TEST_GITHUB_USER_USERNAME: ((github-username)) + PINNIPED_TEST_GITHUB_USER_PASSWORD: ((github-password)) + PINNIPED_TEST_GITHUB_USER_OTP_SECRET: ((github-user-otp-secret)) + PINNIPED_TEST_GITHUB_USERID: ((github-userid)) + PINNIPED_TEST_GITHUB_ORG: ((github-org)) + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES: ((github-expected-team-names)) + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS: ((github-expected-team-slugs)) + +resource_types: + + - name: google-chat-notify-resource + type: docker-image + source: + repository: springio/google-chat-notify-resource + tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags + # We are only doing pulls of this resource type, but add the username and password to avoid + # hitting a rate limit. Our free account is only allowed to have one access token, so we + # cannot make a read-only token for performing pulls. + username: getpinniped + password: ((getpinniped-dockerhub-image-push-access-token)) + + # Use the latest version of the github-release resource because of this problem: + # https://github.com/concourse/github-release-resource/pull/107 + - name: github-release + type: registry-image + source: + repository: concourse/github-release-resource + # We are only doing pulls of this resource type, but add the username and password to avoid + # hitting a rate limit. Our free account is only allowed to have one access token, so we + # cannot make a read-only token for performing pulls. + username: getpinniped + password: ((getpinniped-dockerhub-image-push-access-token)) + +resources: + + - name: weekdays + type: time + icon: calendar-clock + source: + location: America/Los_Angeles + start: 6:00 AM + stop: 7:00 AM + days: [ Monday, Tuesday, Wednesday, Thursday, Friday ] + + - name: gchat + type: google-chat-notify-resource + icon: chat-outline + source: + url: ((gchat-project-pinniped-bots-webhook-url)) + + - name: pinniped + type: git + icon: github + check_every: 10m + webhook_token: ((github-webhook-token)) + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: main + private_key: ((source-repo-deploy-key)) + + - name: pinniped-ci + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + + - name: homebrew-pinniped + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/homebrew-pinniped.git + branch: main + private_key: ((homebrew-repo-read-write-deploy-key)) + + - name: ci-build-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/ci-build + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: ci-fips-build-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/ci-fips-build + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: ci-test-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/test-ci-test-binaries + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: ci-fips-test-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/test-ci-fips-test-binaries + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: integration-test-runner-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/integration-test-runner + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: integration-test-runner-beta-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/integration-test-runner-beta + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: eks-deployer-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/eks-deployer + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: code-coverage-uploader-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/code-coverage-uploader + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.25-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.25 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.26-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.26 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.27-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.27 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.28-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.28 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.29-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.29 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.30-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.30 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.31-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.31 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: kind-release + type: github-release + source: + access_token: ((ci-bot-access-token-with-read-user-permission)) # needed to avoid rate limits on GitHub API requests + owner: kubernetes-sigs + repository: kind + pre_release: true + + - name: gcloud-image + type: registry-image + icon: docker + source: + repository: google/cloud-sdk + tag: slim + + - name: k8s-app-deployer-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-app-deployer + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: deployment-yaml-formatter-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/deployment-yaml-formatter + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: aks-deployer-image + type: registry-image + icon: docker + source: + repository: mcr.microsoft.com/azure-cli + + - name: crane-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/crane + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: release-semver + type: semver + icon: counter + source: + driver: gcs + bucket: tanzu-user-authentication-private-ci + key: semver/0.0.x-version.txt + json_key: ((gcr-image-pusher-json-key)) + initial_version: 0.0.0 + + - name: github-release + type: github-release + icon: github + source: + owner: vmware-tanzu + repository: pinniped + access_token: ((ci-bot-access-token-with-public-repo-write-permission)) + drafts: true + + - name: github-final-release + type: github-release + icon: github + source: + owner: vmware-tanzu + repository: pinniped + access_token: ((ci-bot-access-token-with-public-repo-write-permission)) + +jobs: + + - name: lint + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - in_parallel: + - task: lint + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/run-verify-lint/task.yml + + - name: verify-codegen + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - get: k8s-code-generator-1.25-image + - get: k8s-code-generator-1.26-image + - get: k8s-code-generator-1.27-image + - get: k8s-code-generator-1.28-image + - get: k8s-code-generator-1.29-image + - get: k8s-code-generator-1.30-image + - get: k8s-code-generator-1.31-image + - in_parallel: + - task: verify-go-mod-tidy + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-go-mod-tidy/task.yml + - task: verify-go-generate + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-go-generate/task.yml + - task: codegen-1.25 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.25-image + params: + KUBE_MINOR_VERSION: "1.25" + - task: codegen-1.26 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.26-image + params: + KUBE_MINOR_VERSION: "1.26" + - task: codegen-1.27 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.27-image + params: + KUBE_MINOR_VERSION: "1.27" + - task: codegen-1.28 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.28-image + params: + KUBE_MINOR_VERSION: "1.28" + - task: codegen-1.29 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.29-image + params: + KUBE_MINOR_VERSION: "1.29" + - task: codegen-1.30 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.30-image + params: + KUBE_MINOR_VERSION: "1.30" + - task: codegen-1.31 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.31-image + params: + KUBE_MINOR_VERSION: "1.31" + + - name: unit-test + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - get: code-coverage-uploader-image + - task: run-unit-tests + timeout: 45m + file: pinniped-ci/pipelines/shared-tasks/run-unit-tests/task.yml + - task: upload-test-coverage + timeout: 10m + params: + CODECOV_TOKEN: ((codecov-token)) + file: pinniped-ci/pipelines/shared-tasks/upload-test-coverage/task.yml + image: code-coverage-uploader-image + + - name: run-go-vuln-scan + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - task: run-go-vuln-scan + file: pinniped-ci/pipelines/shared-tasks/run-go-vuln-scan/task.yml + params: + BUILD_TAGS: + + - name: build-cli + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - task: compile + timeout: 45m + file: pinniped-ci/pipelines/shared-tasks/build-cli-binaries/task.yml + params: + DRY_RUN: "yes" + + - name: build-image + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + # These are the labels supported by GitHub Container Registry. + # See https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#labelling-container-images + # Note that these labels appear in the config of each platform-specific image, not in the multi-arch image's manifest. + # Also note that the GitHub UI says: "To connect a repository to your container image, the namespace + # for the repository and container image on GitHub must be the same. For example, they should be owned by + # the same user or organization." So these would only show in the GitHub UI for a package owned by the + # vmware-tanzu org. + # It is not clear if dockerhub will pay any attention to these labels for its UI. It doesn't seem to. + LABEL_org.opencontainers.image.source: "https://github.com/vmware-tanzu/pinniped" + LABEL_org.opencontainers.image.licenses: "Apache-2.0" + LABEL_org.opencontainers.image.description: "The official container images of https://pinniped.dev" + - put: ci-build-image + get_params: + skip_download: true + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: check-image-version + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + passed: [ build-image ] + - get: ci-build-image + params: + format: rootfs + passed: [ build-image ] + - get: pinniped-ci + - task: confirm-version + file: pinniped-ci/pipelines/shared-tasks/confirm-version/task.yml + input_mapping: + image: ci-build-image + + # this job builds pinniped using a different dockerfile + # to test that it is fips compatible. + # it uses go-boringcrypto rather than base go. + - name: build-fips-image + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - task: build-fips-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped-ci + - name: pinniped + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped + UNPACK_ROOTFS: true + DOCKERFILE: pinniped/hack/Dockerfile_fips + - task: confirm-built-with-fips + file: pinniped-ci/pipelines/shared-tasks/confirm-built-with-fips/task.yml + - put: ci-fips-build-image + get_params: { skip_download: true } + params: + image: image/image.tar + + - name: build-test-image + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - in_parallel: + - task: build-test-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped + - name: pinniped-ci + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped + DOCKERFILE: pinniped-ci/pipelines/shared-helpers/test-binaries-image/Dockerfile + - put: ci-test-image + get_params: { skip_download: true } + params: + image: image/image.tar + + - name: build-test-fips-image + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + trigger: true + - get: pinniped-ci + - in_parallel: + - task: build-test-fips-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped + - name: pinniped-ci + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped + DOCKERFILE: pinniped-ci/pipelines/shared-helpers/test-binaries-image/Dockerfile_fips + - put: ci-fips-test-image + get_params: { skip_download: true } + params: + image: image/image.tar + + - name: ready-for-int # fan-in to make pass constraints for the rest of the pipeline easier to reason about + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: + - lint + - unit-test + - verify-codegen + - check-image-version + - build-test-image + - build-fips-image + - build-test-fips-image + - build-cli + - run-go-vuln-scan + trigger: true + - get: ci-build-image + passed: [ check-image-version ] + params: + skip_download: true + - get: ci-fips-build-image + passed: [ build-fips-image ] + params: + skip_download: true + - get: ci-test-image + passed: [ build-test-image ] + params: + skip_download: true + - get: ci-fips-test-image + passed: [ build-test-fips-image ] + params: + skip_download: true + + - name: scan-image + public: false # hide security scan results + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + params: + depth: 1 + - get: ci-build-image + passed: [ ready-for-int ] + params: + format: oci + - get: pinniped-ci + - task: scan-image-trivy + input_mapping: + image: ci-build-image + file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml + params: + GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission)) + IGNORE_VULNERABILITY_IDS: | + # Trivy thinks this is a HIGH CVE in go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc. + # That is an indirect dependency of our project, which we inherit from our direct dep k8s.io/apiserver. + # Therefore, we will inherit a solution for this CVE from k8s.io/apiserver when they fix it. Ignore it for now. + CVE-2023-47108 + <<: *notify_on_failure + + - name: integration-test-1.21 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-21-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.22 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-22-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.23 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-23-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.24 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-24-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.25 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-25-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.26 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-26-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.27 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-27-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.28 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-28-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.29 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-29-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.30 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-30-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-1.31 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_v1-31-x + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest-arm64 + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + INSTANCE_ARCH: arm64 # deploy on an arm64 VM to have a Kind cluster with arm64 nodes + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest-with-external-idps + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + # The following Okta params will cause the integration tests to use Okta instead of Dex. + # We don't need to run these on every version of Kubernetes for Kind in this pipeline, so we choose to run + # them on one version to get some coverage. + <<: *okta_integration_env_vars + # The following Jumpcloud params will cause the integration tests to use Jumpcloud instead of OpenLDAP. + # We don't need to run these on every version of Kubernetes for Kind in this pipeline, so we choose to run + # them on one version to get some coverage. + <<: *jumpcloud_integration_env_vars + # The following AD params enable the ActiveDirectory integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, so we choose to run them on one version to get some coverage. + <<: *active_directory_integration_env_vars + # The following params enable the GitHub integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, so we choose to run them on one version to get some coverage. + <<: *github_integration_env_vars + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-idps-firewalled + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + # Prevent direct connections from the Concierge and Supervisor to each other and to Dex, + # local user authenticator, and GitHub. Also configures the Concierge and Supervisor to + # make https requests through the Squid web proxy server. Tests the HTTPS_PROXY feature + # for sending requests through a corporate proxy for OIDCIdentityProvider (Supervisor->Dex), + # GitHubIdentityProvider (Supervisor->GitHub), JWTAuthenticator (Concierge->Supervisor), + # and WebhookAuthenticator (Concierge->local user authenticator). + FIREWALL_IDPS: "yes" + # Enable GitHub integration tests, to test using GitHubIdentityProviders through a proxy. + <<: *github_integration_env_vars + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest-fips + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-fips-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-fips-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + ci-build-image: ci-fips-build-image + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + # The following AD params enable the ActiveDirectory integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, but it is useful to know if we can communicate with our + # AD server when using FIPS cipher suites. + <<: *active_directory_integration_env_vars + # The following params enable the GitHub integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, but it is useful to know if we can communicate with + # GitHub when using FIPS cipher suites. + <<: *github_integration_env_vars + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + input_mapping: + ci-test-image: ci-fips-test-image + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + ci-build-image: ci-fips-build-image + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-k8s-main + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_k8s-main + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: kubectl-apply-test + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + - task: uninstall + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + file: pinniped-ci/pipelines/shared-tasks/run-kubectl-uninstall/task.yaml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + # Run the changed-api-group test on the oldest and the newest versions of Kubernetes that we support to give + # us confidence that the middleware code works for versions within that range, without needing to pay the + # cost of running it on every version within the range. + - name: integration-test-changed-api-group-oldest + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *oldest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + PINNIPED_API_GROUP_SUFFIX: walrus.tld + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + # Run the changed-api-group test on the oldest and the newest versions of Kubernetes that we support to give + # us confidence that the middleware code works for versions within that range, without needing to pay the + # cost of running it on every version within the range. + - name: integration-test-changed-api-group-latest + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + PINNIPED_API_GROUP_SUFFIX: walrus.tld + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-multiple-pinnipeds + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - do: # deploy sequentially so when the second deploy starts, it can assume that dex and local-user-authenticator are already deployed + - task: generate-pinniped-password + file: pinniped-ci/pipelines/shared-tasks/generate-pinniped-password/task.yml + image: integration-test-runner-image + - task: deploy-pinniped-with-default-api-group + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + output_mapping: + integration-test-env-vars: integration-test-env-vars-with-default-api-group + kubeconfig: kubeconfig-with-default-api-group + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + SECONDARY_SUPERVISOR_APP_NAME: secondary-supervisor + SECONDARY_SUPERVISOR_NAMESPACE: secondary-supervisor + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: deploy-pinniped-with-custom-api-group + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + output_mapping: + integration-test-env-vars: integration-test-env-vars-with-custom-api-group + kubeconfig: kubeconfig-with-custom-api-group + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + PINNIPED_API_GROUP_SUFFIX: walrus.tld + SECONDARY_DEPLOY: "yes" # don't deploy dex and the local-user-authenticator a second time into the cluster + PINNIPED_CONCIERGE_APP_NAME: secondary-concierge + PINNIPED_SUPERVISOR_APP_NAME: secondary-supervisor + PINNIPED_SUPERVISOR_HTTP_NODEPORT: 30234 + PINNIPED_SUPERVISOR_HTTPS_NODEPORT: 30243 + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - do: # once we have proper locking inside our tests, we can convert this back to an "in_parallel:" block + - task: run-integration-tests-on-pinniped-with-default-api-group + input_mapping: + integration-test-env-vars: integration-test-env-vars-with-default-api-group + kubeconfig: kubeconfig-with-default-api-group + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + - task: run-integration-tests-on-pinniped-with-custom-api-group + input_mapping: + integration-test-env-vars: integration-test-env-vars-with-custom-api-group + kubeconfig: kubeconfig-with-custom-api-group + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + PINNIPED_SUPERVISOR_NAMESPACE: secondary-supervisor + PINNIPED_SUPERVISOR_NODEPORT_SERVICE: secondary-supervisor-nodeport + PINNIPED_SUPERVISOR_HTTPS_HOST_PORT: 11344 # see gce-init.sh for the meaning of this port + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: uninstall-test + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: test + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + TEST_SCRIPT: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh + file: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/task.yml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: uninstall-existing-ns-test + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: test + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + TEST_SCRIPT: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh + file: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/task.yml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: ready-for-acceptance # fan-in for UI visualization purposes + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped + passed: + - integration-test-1.21 + - integration-test-1.22 + - integration-test-1.23 + - integration-test-1.24 + - integration-test-1.25 + - integration-test-1.26 + - integration-test-1.27 + - integration-test-1.28 + - integration-test-1.29 + - integration-test-1.30 + - integration-test-1.31 + - integration-test-latest-arm64 + - integration-test-latest-with-external-idps + - integration-test-idps-firewalled + - integration-test-latest-fips + - kubectl-apply-test + # integration-test-k8s-main is purposefully excluded. Failures are informational, not blocking. + - integration-test-changed-api-group-oldest + - integration-test-changed-api-group-latest + - integration-test-multiple-pinnipeds + - uninstall-test + - uninstall-existing-ns-test + trigger: true + - get: ci-build-image + passed: + - integration-test-1.21 + - integration-test-1.22 + - integration-test-1.23 + - integration-test-1.24 + - integration-test-1.25 + - integration-test-1.26 + - integration-test-1.27 + - integration-test-1.28 + - integration-test-1.29 + - integration-test-1.30 + - integration-test-1.31 + - integration-test-latest-arm64 + - integration-test-latest-with-external-idps + - integration-test-idps-firewalled + # integration-test-latest-fips uses a different build image, so it is skipped. + - kubectl-apply-test + # integration-test-k8s-main is purposefully excluded. Failures are informational, not blocking. + - integration-test-changed-api-group-oldest + - integration-test-changed-api-group-latest + - integration-test-multiple-pinnipeds + - uninstall-test + - uninstall-existing-ns-test + params: + skip_download: true + - get: ci-test-image + passed: + - integration-test-1.21 + - integration-test-1.22 + - integration-test-1.23 + - integration-test-1.24 + - integration-test-1.25 + - integration-test-1.26 + - integration-test-1.27 + - integration-test-1.28 + - integration-test-1.29 + - integration-test-1.30 + - integration-test-1.31 + - integration-test-latest-arm64 + - integration-test-latest-with-external-idps + - integration-test-idps-firewalled + # integration-test-latest-fips uses a different build image, so it is skipped. + - kubectl-apply-test + # integration-test-k8s-main is purposefully excluded. Failures are informational, not blocking. + - integration-test-changed-api-group-oldest + - integration-test-changed-api-group-latest + - integration-test-multiple-pinnipeds + - uninstall-test + - uninstall-existing-ns-test + params: + skip_download: true + + - name: deploy-and-test-acceptance-gke + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + # Run weekdays to get constant feedback as test dependencies (e.g. Chrome) release new versions. + - get: weekdays + trigger: true + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: integration-test-runner-beta-image + - get: k8s-app-deployer-image + - task: deploy-to-acceptance-gke + file: pinniped-ci/pipelines/shared-tasks/deploy-to-acceptance-gke/task.yml + image: k8s-app-deployer-image + timeout: 45m + params: + PINNIPED_GCP_PROJECT: ((gcp-project-name)) + GKE_USERNAME: ((gke-cluster-developer-username)) + GKE_JSON_KEY: ((gke-cluster-developer-json-key)) + CI_BUILD_IMAGE_NAME: ((ci-ghcr-registry))/ci-build + CI_BUILD_IMAGE_SERVER: https://ghcr.io + CI_BUILD_IMAGE_USERNAME: ((ci-ghcr-puller-username)) + CI_BUILD_IMAGE_PASSWORD: ((ci-ghcr-puller-token)) + # The TMC CLI has been deprecated and replaced by the tanzu CLI. Also, the TMC agent consumes a lot of + # resources and previously caused us to require a larger GKE cluster with more nodes to be able to run + # this job. So for now, don't install any TMC stuff onto this cluster. + # Test using the local user authenticator instead. + # TMC_API_TOKEN: ((tmc-bot-api-token)) + # TMC_CLUSTER_NAME: gke-acceptance-cluster + DEPLOY_LOCAL_USER_AUTHENTICATOR: "yes" + GKE_CLUSTER_NAME: gke-acceptance-cluster + SUPERVISOR_AND_CONCIERGE_NO_CPU_REQUEST: true + RESERVED_LOAD_BALANCER_STATIC_IP: 35.224.24.196 + LOAD_BALANCER_DNS_NAME: gke-acceptance-supervisor-lb.test.pinniped.dev + INGRESS_STATIC_IP_GCLOUD_NAME: gke-acceptance-test-supervisor-ingress-ip + INGRESS_DNS_ENTRY_GCLOUD_NAME: gke-acceptance-supervisor.test.pinniped.dev + <<: *okta_integration_env_vars + OKTA_SUPERVISOR_CALLBACK: ((okta-supervisor-callback)) + <<: *jumpcloud_integration_env_vars + <<: *active_directory_integration_env_vars + <<: *github_integration_env_vars + - task: install-and-configure-cert-manager + file: pinniped-ci/pipelines/shared-tasks/install-and-configure-cert-manager/task.yml + image: k8s-app-deployer-image + timeout: 15m + params: + PINNIPED_GCP_PROJECT: ((gcp-project-name)) + CERT_MANAGER_DNS_ADMIN_JSON_KEY: ((cert-manager-dns-admin-json-key)) + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + - task: run-integration-tests-beta + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-beta-image + params: + TEST_RUN_REGEX: "/_Browser" + <<: *notify_on_failure + <<: *notify_on_success + + # Fan-in just to make it easy to see in the UI which versions are ready to go. + - name: ready-to-release + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: ci-build-image + passed: + - deploy-and-test-acceptance-gke + - scan-image + params: + skip_download: true + - get: pinniped + passed: + - deploy-and-test-acceptance-gke + - scan-image + params: + depth: 1 + trigger: true + + - name: release + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-to-release ] + params: + format: oci + - get: pinniped + passed: [ ready-to-release ] + - get: pinniped-ci + - get: release-semver + params: + bump: minor + - get: previous-release-semver + resource: release-semver + - get: k8s-app-deployer-image + - get: deployment-yaml-formatter-image + - get: crane-image + - task: format-release + file: pinniped-ci/pipelines/shared-tasks/format-release/task.yml + params: + # Change this if you are creating a patch release. + RELEASE_TYPE: minor + - task: build-cli-binaries + file: pinniped-ci/pipelines/shared-tasks/build-cli-binaries/task.yml + - task: copy-pinniped-server-image-to-dockerhub + file: pinniped-ci/pipelines/shared-tasks/copy-image/task.yml + image: crane-image + params: + SOURCE_REPOSITORY_USERNAME: ((ci-ghcr-puller-username)) + SOURCE_REPOSITORY_PASSWORD: ((ci-ghcr-puller-token)) + DESTINATION_REPOSITORY: docker.io/getpinniped/pinniped-server + DESTINATION_TAG: latest # note that we will also choose more tags based on the release-info output from the task above + DESTINATION_REPOSITORY_USERNAME: getpinniped + DESTINATION_REPOSITORY_PASSWORD: ((getpinniped-dockerhub-image-push-access-token)) + - task: copy-pinniped-server-image-to-ghcr + file: pinniped-ci/pipelines/shared-tasks/copy-image/task.yml + image: crane-image + params: + SOURCE_REPOSITORY_USERNAME: ((ci-ghcr-puller-username)) + SOURCE_REPOSITORY_PASSWORD: ((ci-ghcr-puller-token)) + DESTINATION_REPOSITORY: ghcr.io/vmware-tanzu/pinniped/pinniped-server + DESTINATION_TAG: latest # note that we will also choose more tags based on the release-info output from the task above + DESTINATION_REPOSITORY_USERNAME: ((ci-ghcr-pusher-username)) + DESTINATION_REPOSITORY_PASSWORD: ((ci-ghcr-pusher-token)) + - task: template-deployment-yamls + file: pinniped-ci/pipelines/shared-tasks/template-deployment-yamls/task.yml + image: deployment-yaml-formatter-image + params: + # Specify the repo to render into the YAML files. The task will ask the registry for the image digest using the release tag. + IMAGE_REPO: ghcr.io/vmware-tanzu/pinniped/pinniped-server + - put: github-release + inputs: + - release-semver + - release-info + - pinniped + - cli-binaries + - deployment-yamls + params: + name: release-info/version-with-v + tag: release-info/version-with-v + body: release-info/body + commitish: pinniped/.git/ref + globs: + - deployment-yamls/* + - cli-binaries/* + - put: release-semver + inputs: + - release-semver + params: + file: release-semver/version + + - name: update-version-and-cli-docs + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + # Getting release-semver with a pass constraint is only to make this job show up after the release job in the UI. + - get: release-semver + passed: [ release ] + - get: github-final-release + trigger: true + params: + globs: [ pinniped-cli-linux-amd64 ] + - get: pinniped-ci + - get: pinniped + - task: update-version-and-cli-docs + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/update-version-and-cli-docs/task.yml + input_mapping: { pinniped-in: pinniped } + - put: pinniped + params: + repository: pinniped-out + + - name: update-homebrew-formula + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + # Getting release-semver with a pass constraint is only to make this job show up after the release job in the UI. + - get: release-semver + passed: [ release ] + - get: github-final-release + trigger: true + - get: homebrew-pinniped + - get: pinniped-ci + - task: update-homebrew-formula + file: pinniped-ci/pipelines/shared-tasks/update-homebrew-formula/task.yml + input_mapping: + github-release: github-final-release + homebrew-pinniped-in: homebrew-pinniped + - put: homebrew-pinniped + params: + repository: homebrew-pinniped-out + + - name: integration-test-gke-rapid + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: k8s-app-deployer-image + - do: + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-gke-cluster/task.yml + image: k8s-app-deployer-image + params: + GKE_CHANNEL: rapid + <<: *gke_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-gke-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-gke-cluster-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/gke.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + <<: *notify_on_failure + <<: *notify_on_success + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-gke-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-gke-cluster-output + ensure: + task: remove-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/remove-gke-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + gke-cluster-pool: deploy-gke-cluster-output + params: + <<: *gke_account_params + + - name: integration-test-gke-stable + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: k8s-app-deployer-image + - do: + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-gke-cluster/task.yml + image: k8s-app-deployer-image + params: + GKE_CHANNEL: stable + <<: *gke_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-gke-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-gke-cluster-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/gke.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + <<: *notify_on_failure + <<: *notify_on_success + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-gke-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-gke-cluster-output + ensure: + task: remove-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/remove-gke-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + gke-cluster-pool: deploy-gke-cluster-output + params: + <<: *gke_account_params + + - name: integration-test-eks-oldest + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: eks-deployer-image + - get: k8s-app-deployer-image + - do: + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-eks-cluster/task.yml + image: eks-deployer-image + params: + KUBE_VERSION: "1.25" # See https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html + <<: *aws_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-eks-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-eks-cluster-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/eks.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + <<: *notify_on_failure + <<: *notify_on_success + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-eks-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-eks-cluster-output + ensure: + task: remove-cluster + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-eks-cluster/task.yml + image: eks-deployer-image + input_mapping: + eks-cluster-pool: deploy-eks-cluster-output + params: + <<: *aws_account_params + + - name: integration-test-eks-newest + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: eks-deployer-image + - get: k8s-app-deployer-image + - do: + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-eks-cluster/task.yml + image: eks-deployer-image + params: + KUBE_VERSION: "1.28" # See https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html + <<: *aws_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-eks-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-eks-cluster-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/eks.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + <<: *notify_on_failure + <<: *notify_on_success + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-eks-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-eks-cluster-output + ensure: + task: remove-cluster + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-eks-cluster/task.yml + image: eks-deployer-image + input_mapping: + eks-cluster-pool: deploy-eks-cluster-output + params: + <<: *aws_account_params + + - name: integration-test-aks-oldest + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: aks-deployer-image + - get: k8s-app-deployer-image + - do: + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-aks-cluster/task.yml + image: aks-deployer-image + params: + KUBE_VERSION: "1.28" # See https://learn.microsoft.com/en-us/azure/aks/supported-kubernetes-versions + <<: *azure_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-aks-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-aks-cluster-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/aks.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + <<: *notify_on_failure + <<: *notify_on_success + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-aks-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-aks-cluster-output + ensure: + task: remove-cluster + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-aks-cluster/task.yml + image: aks-deployer-image + input_mapping: + aks-cluster-pool: deploy-aks-cluster-output + params: + <<: *azure_account_params + + - name: integration-test-aks-latest + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: ci-build-image + passed: [ ready-for-acceptance ] + - get: ci-test-image + passed: [ ready-for-acceptance ] + - get: pinniped + passed: [ ready-for-acceptance ] + trigger: true + - get: pinniped-ci + - get: integration-test-runner-image + - get: aks-deployer-image + - get: k8s-app-deployer-image + - do: + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-aks-cluster/task.yml + image: aks-deployer-image + params: + KUBE_VERSION: "1.29" # See https://learn.microsoft.com/en-us/azure/aks/supported-kubernetes-versions + <<: *azure_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-aks-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-aks-cluster-output + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/aks.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + <<: *notify_on_failure + <<: *notify_on_success + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-aks-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-aks-cluster-output + ensure: + task: remove-cluster + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-aks-cluster/task.yml + image: aks-deployer-image + input_mapping: + aks-cluster-pool: deploy-aks-cluster-output + params: + <<: *azure_account_params diff --git a/pipelines/main/update-pipeline.sh b/pipelines/main/update-pipeline.sh new file mode 100755 index 000000000..cf860a1e8 --- /dev/null +++ b/pipelines/main/update-pipeline.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" +ensure_time_resource_has_at_least_one_version "$pipeline" weekdays + +# Make the pipeline visible to non-authenticated users in the web UI. +$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline" diff --git a/pipelines/pull-requests/pipeline.yml b/pipelines/pull-requests/pipeline.yml new file mode 100644 index 000000000..2c4d89862 --- /dev/null +++ b/pipelines/pull-requests/pipeline.yml @@ -0,0 +1,1914 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/d/d0/Pinniped_underwater.jpg + +meta: + + # Save some work by skipping the full download in the implicit "get" after each of these "put" operations, + # as mentioned in the docs here: https://github.com/telia-oss/github-pr-resource#get + pr-status-handlers: + on_success: &pr-status-on-success + put: update-pull-request-status-success + resource: pinniped-pr + get_params: { skip_download: true } + timeout: 5m + inputs: [ pinniped-pr ] + params: &pr-status-on-success-params + path: pinniped-pr + status: success + on_failure: &pr-status-on-failure + put: update-pull-request-status-failed + resource: pinniped-pr + get_params: { skip_download: true } + timeout: 5m + inputs: [ pinniped-pr ] + params: &pr-status-on-failure-params + path: pinniped-pr + status: failure + on_error: &pr-status-on-error + put: update-pull-request-status-error + resource: pinniped-pr + get_params: { skip_download: true } + timeout: 5m + inputs: [ pinniped-pr ] + params: &pr-status-on-error-params + path: pinniped-pr + status: error + on_abort: &pr-status-on-abort + put: update-pull-request-status-aborted + resource: pinniped-pr + get_params: { skip_download: true } + timeout: 5m + inputs: [ pinniped-pr ] + params: &pr-status-on-abort-params + path: pinniped-pr + status: error + + # on_pending isn't a real handler, but we can reuse this *pr-status-on-pending block + # as a task at the beginning of each job. + on_pending: &pr-status-on-pending + put: update-pull-request-status-pending + resource: pinniped-pr + get_params: { skip_download: true } + timeout: 5m + inputs: [ pinniped-pr ] + params: &pr-status-on-pending-params + path: pinniped-pr + status: pending + + pinniped-pr-input-mapping: &pinniped-pr-input-mapping + input_mapping: + pinniped: pinniped-pr + + # GKE account info and which zone the clusters should be created in and deleted from. + gke_account_params: &gke_account_params + CLUSTER_ZONE: us-central1-c + GCP_PROJECT: ((gcp-project-name)) + GCP_SERVICE_ACCOUNT: ((gke-test-pool-manager-username)) + GCP_JSON_KEY: ((gke-test-pool-manager-json-key)) + + # GCP account info and which zone the workers should be created in and deleted from. + gcp_account_params: &gcp_account_params + INSTANCE_ZONE: us-central1-b # which zone the kind worker VMs should be created in and deleted from + GCP_ZONE: us-central1-b + GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gcp-instance-admin-username)) + GCP_JSON_KEY: ((gcp-instance-admin-json-key)) + + cluster_diagnostics_task: &cluster_diagnostics_task + file: pinniped-ci/pipelines/shared-tasks/export-cluster-diagnostics/task.yml + image: integration-test-runner-image + timeout: 15m + params: + GCS_BUCKET: pinniped-ci-archive + GCP_PROJECT: ((gcp-project-name)) + GCP_USERNAME: ((gcp-cluster-diagnostic-uploader-username)) + GCP_JSON_KEY: ((gcp-cluster-diagnostic-uploaded-json-key)) + + # Decides which specific patch versions of k8s we would like to deploy when creating kind cluster workers. + # It should be safe to update the patch version numbers here whenever new versions come out. + # As the old workers get recycled, they will be replaced with new workers which use the patch + # version specified here. The latest available versions can be found here: + # https://hub.docker.com/r/kindest/node/tags + # Note that the available versions of kind node images lag behind the available versions of Kubernetes itself, + # so always check the tags using the above link. + kube_version_v1-21-x: &kube_version_v1-21-x + KUBE_VERSION: v1.21.14 + kube_version_v1-31-x: &kube_version_v1-31-x + KUBE_VERSION: v1.31.1 + kube_version_k8s-main: &kube_version_k8s-main + KUBE_VERSION: "k8s-main" + KIND_NODE_IMAGE: "ghcr.io/pinniped-ci-bot/kind-node-image:latest" + # Whenever we add a new Kubernetes version for kind clusters, please remember update these + # two aliases to reference the oldest and latest Kubernetes versions currently in use. + oldest_kind_kube_version: &oldest_kind_kube_version + <<: *kube_version_v1-21-x + latest_kind_kube_version: &latest_kind_kube_version + <<: *kube_version_v1-31-x + + okta_integration_env_vars: &okta_integration_env_vars + OKTA_CLI_CALLBACK: ((okta-cli-callback)) + OKTA_CLI_CLIENT_ID: ((okta-cli-client-id)) + OKTA_ADDITIONAL_SCOPES: ((okta-additional-scopes)) + OKTA_USERNAME_CLAIM: ((okta-username-claim)) + OKTA_GROUPS_CLAIM: ((okta-groups-claim)) + OKTA_ISSUER: ((okta-issuer)) + OKTA_PASSWORD: ((okta-password)) + OKTA_SUPERVISOR_CLIENT_ID: ((okta-supervisor-client-id)) + OKTA_SUPERVISOR_CLIENT_SECRET: ((okta-supervisor-client-secret)) + OKTA_USERNAME: ((okta-username)) + OKTA_GROUPS: ((okta-groups)) + + jumpcloud_integration_env_vars: &jumpcloud_integration_env_vars + JUMPCLOUD_LDAP_HOST: ((jumpcloud-ldap-host)) + JUMPCLOUD_LDAP_STARTTLS_ONLY_HOST: ((jumpcloud-ldap-start-tls-only-host)) + JUMPCLOUD_LDAP_BIND_ACCOUNT_USERNAME: ((jumpcloud-ldap-bind-account-username)) + JUMPCLOUD_LDAP_BIND_ACCOUNT_PASSWORD: ((jumpcloud-ldap-bind-account-password)) + JUMPCLOUD_LDAP_USERS_SEARCH_BASE: ((jumpcloud-ldap-users-search-base)) + JUMPCLOUD_LDAP_GROUPS_SEARCH_BASE: ((jumpcloud-ldap-groups-search-base)) + JUMPCLOUD_LDAP_USER_DN: ((jumpcloud-ldap-user-dn)) + JUMPCLOUD_LDAP_USER_CN: ((jumpcloud-ldap-user-cn)) + JUMPCLOUD_LDAP_USER_PASSWORD: ((jumpcloud-ldap-user-password)) + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME: ((jumpcloud-ldap-user-unique-id-attribute-name)) + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE: ((jumpcloud-ldap-user-unique-id-attribute-value)) + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_NAME: ((jumpcloud-ldap-user-email-attribute-name)) + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_VALUE: ((jumpcloud-ldap-user-email-attribute-value)) + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_DN: ((jumpcloud-ldap-expected-direct-groups-dn)) + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_CN: ((jumpcloud-ldap-expected-direct-groups-cn)) + JUMPCLOUD_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN: ((jumpcloud-ldap-expected-direct-posix-groups-cn)) + + active_directory_integration_env_vars: &active_directory_integration_env_vars + TEST_ACTIVE_DIRECTORY: "yes" + AWS_AD_HOST: ((aws-ad-host)) + AWS_AD_DOMAIN: ((aws-ad-domain)) + AWS_AD_BIND_ACCOUNT_USERNAME: ((aws-ad-bind-account-username)) + AWS_AD_BIND_ACCOUNT_PASSWORD: ((aws-ad-bind-account-password)) + AWS_AD_USER_USER_PRINCIPAL_NAME: ((aws-ad-user-userprincipalname)) + AWS_AD_USER_PASSWORD: ((aws-ad-user-password)) + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME: ((aws-ad-user-unique-id-attribute-name)) + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE: ((aws-ad-user-unique-id-attribute-value)) + AWS_AD_USER_EXPECTED_GROUPS_DN: ((aws-ad-expected-direct-groups-dn)) + AWS_AD_USER_EXPECTED_GROUPS_CN: ((aws-ad-expected-direct-groups-cn)) + AWS_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME: ((aws-ad-expected-direct-and-nested-groups-samaccountnames)) + AWS_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES: ((aws-ad-expected-direct-and-nested-groups-samaccountname-domainnames)) + AWS_AD_LDAPS_CA_BUNDLE: ((aws-ad-ca-data)) + AWS_AD_DEACTIVATED_USER_SAMACCOUNTNAME: ((aws-ad-deactivated-user-samaccountname)) + AWS_AD_DEACTIVATED_USER_PASSWORD: ((aws-ad-deactivated-user-password)) + AWS_AD_USER_EMAIL_ATTRIBUTE_VALUE: ((aws-ad-user-email-attribute-value)) + AWS_AD_DEFAULTNAMINGCONTEXT_DN: ((aws-ad-defaultnamingcontext)) + AWS_AD_USERS_DN: ((aws-ad-users-dn)) + + github_integration_env_vars: &github_integration_env_vars + PINNIPED_TEST_GITHUB_APP_CLIENT_ID: ((github-app-client-id)) + PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET: ((github-app-client-secret)) + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID: ((github-oauth-app-client-id)) + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET: ((github-oauth-app-client-secret)) + PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL: ((github-oauth-app-allowed-callback-url)) + PINNIPED_TEST_GITHUB_USER_USERNAME: ((github-username)) + PINNIPED_TEST_GITHUB_USER_PASSWORD: ((github-password)) + PINNIPED_TEST_GITHUB_USER_OTP_SECRET: ((github-user-otp-secret)) + PINNIPED_TEST_GITHUB_USERID: ((github-userid)) + PINNIPED_TEST_GITHUB_ORG: ((github-org)) + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES: ((github-expected-team-names)) + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS: ((github-expected-team-slugs)) + +resource_types: + + # Use a fork of github-pr-resource that has "trusted_orgs" and "trusted_users" parametesr. + - name: pull-request + type: registry-image + source: + repository: ((ci-ghcr-registry))/github-pr-resource + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + tag: latest + +resources: + + - name: pinniped-pr + type: pull-request + icon: source-pull + check_every: 10m + webhook_token: ((github-webhook-token)) + source: + repository: vmware-tanzu/pinniped + access_token: ((ci-bot-access-token-with-repo-status-permission)) + disable_forks: false + base_branch: main + disable_ci_skip: true # ignore "[skip ci]" in commit message and PR title + required_review_approvals: 1 + trusted_orgs: # skip required_review_approvals for users with public membership in at least one of these orgs + - vmware + - vmware-tanzu + trusted_users: # skip required_review_approvals for @dependabot so those automated PRs get tested automatically + - dependabot + + - name: pinniped-ci + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + + - name: ci-build-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/test-ci-build + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: ci-fips-build-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/test-ci-fips-build + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: ci-test-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/test-ci-test-binaries + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: ci-fips-test-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/test-ci-fips-test-binaries + username: ((ci-ghcr-pusher-username)) + password: ((ci-ghcr-pusher-token)) + tag: latest + + - name: integration-test-runner-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/integration-test-runner + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: kind-release + type: github-release + source: + access_token: ((ci-bot-access-token-with-read-user-permission)) # needed to avoid rate limits on GitHub API requests + owner: kubernetes-sigs + repository: kind + pre_release: true + + - name: gcloud-image + type: registry-image + icon: docker + source: + repository: google/cloud-sdk + tag: slim + + - name: k8s-app-deployer-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-app-deployer + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: opensource-lint-image + type: registry-image + icon: docker + source: + repository: golangci/golangci-lint + + - name: code-coverage-uploader-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/code-coverage-uploader + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.25-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.25 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.26-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.26 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.27-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.27 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.28-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.28 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.29-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.29 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.30-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.30 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: k8s-code-generator-1.31-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/k8s-code-generator-1.31 + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + +jobs: + + - name: start + public: true # all logs are publicly visible + plan: + - get: pinniped-pr + trigger: true + version: every + - in_parallel: + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: lint } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: verify-codegen } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: unit-test } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: scan-dependencies } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: run-go-vuln-scan } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: build-image } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: build-fips-image } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: build-test-fips-image } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: build-test-binaries } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: scan-image } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-oldest } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-latest } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-latest-arm64 } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-idps-firewalled } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-latest-fips } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-k8s-main } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: kubectl-apply-test } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-changed-api-group-oldest } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-changed-api-group-latest } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-multiple-pinnipeds } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: uninstall-test } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: uninstall-from-existing-namespace-test } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-gke-rapid } } + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, target_url: "$ATC_EXTERNAL_URL/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME", context: integration-test-latest-with-external-idps } } + + - name: lint + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: lint } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: lint } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: lint } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: lint } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: lint } } + - task: lint + timeout: 30m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-lint/task.yml + + - name: lint-latest + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - get: opensource-lint-image + - task: lint + timeout: 30m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-lint/task.yml + image: opensource-lint-image + params: + SKIP_INSTALL_GOLANGCI_LINT: "true" + + - name: verify-codegen + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: verify-codegen } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: verify-codegen } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: verify-codegen } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: verify-codegen } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - get: k8s-code-generator-1.25-image + - get: k8s-code-generator-1.26-image + - get: k8s-code-generator-1.27-image + - get: k8s-code-generator-1.28-image + - get: k8s-code-generator-1.29-image + - get: k8s-code-generator-1.30-image + - get: k8s-code-generator-1.31-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: verify-codegen } } + - in_parallel: + - task: verify-go-mod-tidy + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-go-mod-tidy/task.yml + - task: verify-go-generate + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-go-generate/task.yml + - task: codegen-1.25 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.25-image + params: + KUBE_MINOR_VERSION: "1.25" + - task: codegen-1.26 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.26-image + params: + KUBE_MINOR_VERSION: "1.26" + - task: codegen-1.27 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.27-image + params: + KUBE_MINOR_VERSION: "1.27" + - task: codegen-1.28 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.28-image + params: + KUBE_MINOR_VERSION: "1.28" + - task: codegen-1.29 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.29-image + params: + KUBE_MINOR_VERSION: "1.29" + - task: codegen-1.30 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.30-image + params: + KUBE_MINOR_VERSION: "1.30" + - task: codegen-1.31 + timeout: 20m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.yml + image: k8s-code-generator-1.31-image + params: + KUBE_MINOR_VERSION: "1.31" + + - name: unit-test + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: unit-test } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: unit-test } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: unit-test } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: unit-test } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - get: code-coverage-uploader-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: unit-test } } + - task: run-unit-tests + timeout: 45m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/run-unit-tests/task.yml + - task: upload-test-coverage + timeout: 10m + <<: *pinniped-pr-input-mapping + params: + CODECOV_TOKEN: ((codecov-token)) + file: pinniped-ci/pipelines/shared-tasks/upload-test-coverage/task.yml + image: code-coverage-uploader-image + + - name: scan-dependencies + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: scan-dependencies } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: scan-dependencies } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: scan-dependencies } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: scan-dependencies } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: scan-dependencies } } + - task: get-modules + config: + platform: linux + image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' + inputs: + - name: pinniped-pr + outputs: + - name: pinniped-modules + run: + dir: pinniped-pr + path: sh + args: + - "-c" + - | + set -e + echo "Installing jq..." + ( apt-get update -y && apt-get install -y jq ) 2>&1 > install.log || cat install.log + + # Use 'go list' to find package dependencies, then select the associated module versions. + # See https://github.com/sonatype-nexus-community/nancy/issues/228 for details about why + # we can't just use 'go list -mod -json all'. + echo "Listing Go module dependencies..." + go list -deps -json all | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' > ../pinniped-modules/modules.json + - task: scan + config: + platform: linux + image_resource: + type: registry-image + source: + repository: docker.io/sonatypecommunity/nancy + tag: alpine + inputs: + - name: pinniped-modules + run: + path: 'sh' + args: + - '-c' + - | + set -e + cat < exclusions.txt + # Vulnerability exclusions for Nancy: + # https://github.com/sonatype-nexus-community/nancy#exclude-vulnerabilities + # + # When editing this, please add an `until=` tag on each entry so we remember to revisit + # and clean this file later. + # CVE-0000-00000 until=2022-01-01 + + # CVE-2020-8561 is in k8s.io/apiserver. + # From the comments on this issue https://github.com/kubernetes/kubernetes/issues/104720 + # it seems like the Kubernetes maintainers are never going to fix it. + # Removing the "until" date on the next line to ignore this CVE forever. + CVE-2020-8561 + + EOF + + nancy sleuth --exclude-vulnerability-file=exclusions.txt < pinniped-modules/modules.json + + - name: run-go-vuln-scan + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: run-go-vuln-scan } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: run-go-vuln-scan } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: run-go-vuln-scan } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: run-go-vuln-scan } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: run-go-vuln-scan } } + - task: run-go-vuln-scan + file: pinniped-ci/pipelines/shared-tasks/run-go-vuln-scan/task.yml + input_mapping: + pinniped: pinniped-pr + params: + BUILD_TAGS: + + - name: build-cli + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: build-cli } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: build-cli } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: build-cli } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: build-cli } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: build-cli } } + - task: compile + timeout: 45m + <<: *pinniped-pr-input-mapping + file: pinniped-ci/pipelines/shared-tasks/build-cli-binaries/task.yml + params: + DRY_RUN: "yes" + + - name: build-image + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: build-image } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: build-image } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: build-image } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: build-image } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: build-image } } + - task: build-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped-pr + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped-pr + OUTPUT_OCI: true # needed for building multi-arch images + IMAGE_PLATFORM: "linux/amd64,linux/arm64" # build a multi-arch images which includes these platforms + - put: ci-build-image + get_params: + skip_download: true + format: oci # needed for multi-arch images + params: + image: image/image # this is a directory for OCI (multi-arch images) + + - name: check-image-version + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: check-image-version } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: check-image-version } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: check-image-version } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: check-image-version } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + passed: [ build-image ] + - get: ci-build-image + params: + format: rootfs + passed: [ build-image ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: check-image-version } } + - task: confirm-version + file: pinniped-ci/pipelines/shared-tasks/confirm-version/task.yml + input_mapping: + image: ci-build-image + pinniped: pinniped-pr + + # this job builds pinniped using a different dockerfile + # to test that it is fips compatible. + # it uses go-boringcrypto rather than base go. + - name: build-fips-image + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: build-fips-image } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: build-fips-image } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: build-fips-image } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: build-fips-image } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: build-fips-image } } + - task: build-fips-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped-ci + - name: pinniped-pr + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped-pr + UNPACK_ROOTFS: true + DOCKERFILE: pinniped-pr/hack/Dockerfile_fips + - task: confirm-built-with-fips + file: pinniped-ci/pipelines/shared-tasks/confirm-built-with-fips/task.yml + - put: ci-fips-build-image + get_params: { skip_download: true } + params: + image: image/image.tar + + - name: build-test-image + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: build-test-binaries } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: build-test-binaries } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: build-test-binaries } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: build-test-binaries } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - in_parallel: + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: build-test-binaries } } + - task: build-test-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped-pr + - name: pinniped-ci + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped-pr + DOCKERFILE: pinniped-ci/pipelines/shared-helpers/test-binaries-image/Dockerfile + - put: ci-test-image + get_params: { skip_download: true } + params: + image: image/image.tar + + - name: build-test-fips-image + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: build-test-fips-image } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: build-test-fips-image } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: build-test-fips-image } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: build-test-fips-image } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + trigger: true + version: every + passed: [ start ] + - get: pinniped-ci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: build-test-fips-image } } + - in_parallel: + - task: build-test-fips-image + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: concourse/oci-build-task + inputs: + - name: pinniped-pr + - name: pinniped-ci + outputs: + - name: image + run: + path: build + caches: + - path: cache + params: + CONTEXT: pinniped-pr + DOCKERFILE: pinniped-ci/pipelines/shared-helpers/test-binaries-image/Dockerfile_fips + - put: ci-fips-test-image + get_params: { skip_download: true } + params: + image: image/image.tar + + - name: ready-for-int # fan-in to make pass constraints for the rest of the pipeline easier to reason about + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: + - lint + - unit-test + - verify-codegen + - check-image-version + - build-test-image + - build-cli + - scan-dependencies + - build-fips-image + - build-test-fips-image + - run-go-vuln-scan + trigger: true + - get: ci-build-image + passed: [ check-image-version ] + params: + skip_download: true + - get: ci-test-image + passed: [ build-test-image ] + params: + skip_download: true + - get: ci-fips-build-image + passed: [ build-fips-image ] + params: + skip_download: true + - get: ci-fips-test-image + passed: [ build-test-fips-image ] + params: + skip_download: true + + - name: scan-image + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: scan-image } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: scan-image } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: scan-image } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: scan-image } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: ci-build-image + passed: [ ready-for-int ] + params: + format: oci + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: scan-image } } + - task: trivy + file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml + input_mapping: + image: ci-build-image + params: + GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission)) + IGNORE_VULNERABILITY_IDS: | + # Trivy thinks this is a HIGH CVE in go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc. + # That is an indirect dependency of our project, which we inherit from our direct dep k8s.io/apiserver. + # Therefore, we will inherit a solution for this CVE from k8s.io/apiserver when they fix it. Ignore it for now. + CVE-2023-47108 + + - name: integration-test-oldest + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-oldest } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-oldest } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-oldest } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-oldest } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-oldest } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *oldest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-latest } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-latest } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-latest } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-latest } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-latest } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest-arm64 + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-latest-arm64 } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-latest-arm64 } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-latest-arm64 } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-latest-arm64 } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-latest-arm64 } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + INSTANCE_ARCH: arm64 # deploy on an arm64 VM to have a Kind cluster with arm64 nodes + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest-with-external-idps + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-latest-with-external-idps } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-latest-with-external-idps } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-latest-with-external-idps } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-latest-with-external-idps } } + public: true # all logs are publicly visible + serial_groups: + - github + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-latest-with-external-idps } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + # The following Okta params will cause the integration tests to use Okta instead of Dex. + # We don't need to run these on every version of Kubernetes for Kind in this pipeline, so we choose to run + # them on one version to get some coverage. + <<: *okta_integration_env_vars + # The following Jumpcloud params will cause the integration tests to use Jumpcloud instead of OpenLDAP. + # We don't need to run these on every version of Kubernetes for Kind in this pipeline, so we choose to run + # them on one version to get some coverage. + <<: *jumpcloud_integration_env_vars + # The following AD params enable the ActiveDirectory integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, so we choose to run them on one version to get some coverage. + <<: *active_directory_integration_env_vars + # The following params enable the GitHub integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, so we choose to run them on one version to get some coverage. + <<: *github_integration_env_vars + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-idps-firewalled + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-idps-firewalled } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-idps-firewalled } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-idps-firewalled } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-idps-firewalled } } + public: true # all logs are publicly visible + serial_groups: + - github + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-idps-firewalled } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + # Prevent direct connections from the Concierge and Supervisor to each other and to Dex, + # local user authenticator, and GitHub. Also configures the Concierge and Supervisor to + # make https requests through the Squid web proxy server. Tests the HTTPS_PROXY feature + # for sending requests through a corporate proxy for OIDCIdentityProvider (Supervisor->Dex), + # GitHubIdentityProvider (Supervisor->GitHub), JWTAuthenticator (Concierge->Supervisor), + # and WebhookAuthenticator (Concierge->local user authenticator). + FIREWALL_IDPS: "yes" + # Enable GitHub integration tests, to test using GitHubIdentityProviders through a proxy. + <<: *github_integration_env_vars + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-latest-fips + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-latest-fips } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-latest-fips } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-latest-fips } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-latest-fips } } + public: true # all logs are publicly visible + serial_groups: + - github + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-fips-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-fips-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-latest-fips } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + ci-build-image: ci-fips-build-image + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + # The following AD params enable the ActiveDirectory integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, but it is useful to know if we can communicate with our + # AD server when using FIPS cipher suites. + <<: *active_directory_integration_env_vars + # The following params enable the GitHub integration tests. We don't need to run these on every + # version of Kubernetes for Kind in this pipeline, but it is useful to know if we can communicate with + # GitHub when using FIPS cipher suites. + <<: *github_integration_env_vars + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + input_mapping: + ci-test-image: ci-fips-test-image + pinniped: pinniped-pr + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + ci-build-image: ci-fips-build-image + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-k8s-main + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-k8s-main } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-k8s-main } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-k8s-main } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-k8s-main } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-k8s-main } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *kube_version_k8s-main + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: kubectl-apply-test + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: kubectl-apply-test } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: kubectl-apply-test } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: kubectl-apply-test } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: kubectl-apply-test } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: kubectl-apply-test } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.yml + image: integration-test-runner-image + - task: run-integration-tests + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + input_mapping: + pinniped: pinniped-pr + - task: uninstall + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + file: pinniped-ci/pipelines/shared-tasks/run-kubectl-uninstall/task.yaml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + # Run the changed-api-group test on the oldest and the newest versions of Kubernetes that we support to give + # us confidence that the middleware code works for versions within that range, without needing to pay the + # cost of running it on every version within the range. + - name: integration-test-changed-api-group-oldest + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-changed-api-group-oldest } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-changed-api-group-oldest } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-changed-api-group-oldest } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-changed-api-group-oldest } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-changed-api-group-oldest } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *oldest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + PINNIPED_API_GROUP_SUFFIX: walrus.tld + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + # Run the changed-api-group test on the oldest and the newest versions of Kubernetes that we support to give + # us confidence that the middleware code works for versions within that range, without needing to pay the + # cost of running it on every version within the range. + - name: integration-test-changed-api-group-latest + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-changed-api-group-latest } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-changed-api-group-latest } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-changed-api-group-latest } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-changed-api-group-latest } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-changed-api-group-latest } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: deploy + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + PINNIPED_API_GROUP_SUFFIX: walrus.tld + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-multiple-pinnipeds + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-multiple-pinnipeds } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-multiple-pinnipeds } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-multiple-pinnipeds } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-multiple-pinnipeds } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-multiple-pinnipeds } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - do: # deploy sequentially so when the second deploy starts, it can assume that dex and local-user-authenticator are already deployed + - task: generate-pinniped-password + file: pinniped-ci/pipelines/shared-tasks/generate-pinniped-password/task.yml + image: integration-test-runner-image + - task: deploy-pinniped-with-default-api-group + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + output_mapping: + integration-test-env-vars: integration-test-env-vars-with-default-api-group + kubeconfig: kubeconfig-with-default-api-group + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + SECONDARY_SUPERVISOR_APP_NAME: secondary-supervisor + SECONDARY_SUPERVISOR_NAMESPACE: secondary-supervisor + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - task: deploy-pinniped-with-custom-api-group + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + output_mapping: + integration-test-env-vars: integration-test-env-vars-with-custom-api-group + kubeconfig: kubeconfig-with-custom-api-group + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + PINNIPED_API_GROUP_SUFFIX: walrus.tld + SECONDARY_DEPLOY: "yes" # don't deploy dex and the local-user-authenticator a second time into the cluster + PINNIPED_CONCIERGE_APP_NAME: secondary-concierge + PINNIPED_SUPERVISOR_APP_NAME: secondary-supervisor + PINNIPED_SUPERVISOR_HTTP_NODEPORT: 30234 + PINNIPED_SUPERVISOR_HTTPS_NODEPORT: 30243 + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + - do: # once we have proper locking inside our tests, we can convert this back to an "in_parallel:" block + - task: run-integration-tests-on-pinniped-with-default-api-group + input_mapping: + pinniped: pinniped-pr + integration-test-env-vars: integration-test-env-vars-with-default-api-group + kubeconfig: kubeconfig-with-default-api-group + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + - task: run-integration-tests-on-pinniped-with-custom-api-group + input_mapping: + pinniped: pinniped-pr + integration-test-env-vars: integration-test-env-vars-with-custom-api-group + kubeconfig: kubeconfig-with-custom-api-group + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + params: + START_GCLOUD_PROXY: "yes" + <<: *gcp_account_params + PINNIPED_SUPERVISOR_NAMESPACE: secondary-supervisor + PINNIPED_SUPERVISOR_NODEPORT_SERVICE: secondary-supervisor-nodeport + PINNIPED_SUPERVISOR_HTTPS_HOST_PORT: 11344 # see gce-init.sh for the meaning of this port + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: uninstall-test + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: uninstall-test } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: uninstall-test } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: uninstall-test } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: uninstall-test } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: uninstall-test } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: test + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + TEST_SCRIPT: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh + file: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/task.yml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: uninstall-existing-ns-test + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: uninstall-from-existing-namespace-test } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: uninstall-from-existing-namespace-test } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: uninstall-from-existing-namespace-test } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: uninstall-from-existing-namespace-test } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: kind-release + - get: gcloud-image + - get: k8s-app-deployer-image + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: integration-test-runner-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: uninstall-from-existing-namespace-test } } + - task: deploy-kind-instance + attempts: 5 + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml + image: gcloud-image + params: + <<: *latest_kind_kube_version + <<: *gcp_account_params + - task: test + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/kind.yaml + TEST_SCRIPT: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh + file: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/task.yml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-kind-cluster-vm-output + ensure: + task: remove-kind-instance + attempts: 2 + timeout: 20m + file: pinniped-ci/pipelines/shared-tasks/remove-kind-cluster-vm/task.yml + image: gcloud-image + input_mapping: + kind-cluster-pool: deploy-kind-cluster-vm-output + params: + <<: *gcp_account_params + + - name: integration-test-gke-rapid + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: integration-test-gke-rapid } } + on_failure: { <<: *pr-status-on-failure, params: { <<: *pr-status-on-failure-params, context: integration-test-gke-rapid } } + on_error: { <<: *pr-status-on-error, params: { <<: *pr-status-on-error-params, context: integration-test-gke-rapid } } + on_abort: { <<: *pr-status-on-abort, params: { <<: *pr-status-on-abort-params, context: integration-test-gke-rapid } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: [ ready-for-int ] + trigger: true + - get: pinniped-ci + - get: ci-build-image + passed: [ ready-for-int ] + params: + skip_download: true + - get: ci-test-image + passed: [ ready-for-int ] + - get: integration-test-runner-image + - get: k8s-app-deployer-image + - { <<: *pr-status-on-pending, params: { <<: *pr-status-on-pending-params, context: integration-test-gke-rapid } } + - task: deploy-cluster + timeout: 30m + file: pinniped-ci/pipelines/shared-tasks/deploy-gke-cluster/task.yml + image: k8s-app-deployer-image + params: + GKE_CHANNEL: rapid + <<: *gke_account_params + - task: pre-warm-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/pre-warm-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster-pool: deploy-gke-cluster-output + attempts: 3 + - task: deploy + file: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.yml + image: integration-test-runner-image + timeout: 15m + input_mapping: + cluster-pool: deploy-gke-cluster-output + pinniped: pinniped-pr + params: + CLUSTER_CAPABILITIES_PATH: pinniped/test/cluster_capabilities/gke.yaml + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: "yes" + - task: run-integration-tests + <<: *pinniped-pr-input-mapping + timeout: 75m + file: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.yml + image: integration-test-runner-image + ensure: + task: export-cluster-diagnostics + <<: *cluster_diagnostics_task + input_mapping: + cluster-pool: deploy-gke-cluster-output + ensure: + task: cleanup-kapp + timeout: 5m + file: pinniped-ci/pipelines/shared-tasks/kapp-delete/task.yml + image: k8s-app-deployer-image + input_mapping: + cluster: deploy-gke-cluster-output + ensure: + task: remove-cluster + timeout: 10m + file: pinniped-ci/pipelines/shared-tasks/remove-gke-cluster/task.yml + image: k8s-app-deployer-image + input_mapping: + gke-cluster-pool: deploy-gke-cluster-output + params: + <<: *gke_account_params + + - name: finish + # "ci-passed" only appears here at the end of the pipeline and is marked in the branch protection settings for + # the main branch as a required status, so a PR cannot be merged until this status shows up as a success. + on_success: { <<: *pr-status-on-success, params: { <<: *pr-status-on-success-params, context: ci-passed } } + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-pr + version: every + passed: + - scan-image + - integration-test-oldest + - integration-test-latest + - integration-test-latest-arm64 + - integration-test-latest-with-external-idps + - integration-test-idps-firewalled + - integration-test-gke-rapid + - kubectl-apply-test + # integration-test-k8s-main is purposefully excluded. Failures are informational, not blocking. + - integration-test-latest-fips + - integration-test-changed-api-group-oldest + - integration-test-changed-api-group-latest + - integration-test-multiple-pinnipeds + - uninstall-test + - uninstall-existing-ns-test + trigger: true diff --git a/pipelines/pull-requests/update-pipeline.sh b/pipelines/pull-requests/update-pipeline.sh new file mode 100755 index 000000000..806cf7ff8 --- /dev/null +++ b/pipelines/pull-requests/update-pipeline.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" + +# Make the pipeline visible to non-authenticated users in the web UI. +$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline" diff --git a/pipelines/security-scan/pipeline.yml b/pipelines/security-scan/pipeline.yml new file mode 100644 index 000000000..072ab88bf --- /dev/null +++ b/pipelines/security-scan/pipeline.yml @@ -0,0 +1,267 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +display: + + background_image: https://upload.wikimedia.org/wikipedia/commons/d/d0/KelpforestI2500ppx.JPG + +meta: + + notify_on_failure: ¬ify_on_failure + on_failure: + put: gchat + timeout: 5m + params: + text: | + Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :( + ${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME} + +resource_types: + + # Try using the latest version of the registry-image resource because of this problem: + # https://vmware.slack.com/archives/C6TL2PMC7/p1702052766131149 + - name: registry-image + type: registry-image + source: + repository: concourse/registry-image-resource + tag: latest + # We are only doing pulls of this resource type, but add the username and password to avoid + # hitting a rate limit. Our free account is only allowed to have one access token, so we + # cannot make a read-only token for performing pulls. + username: getpinniped + password: ((getpinniped-dockerhub-image-push-access-token)) + + - name: google-chat-notify-resource + type: docker-image + source: + repository: springio/google-chat-notify-resource + tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags + # We are only doing pulls of this resource type, but add the username and password to avoid + # hitting a rate limit. Our free account is only allowed to have one access token, so we + # cannot make a read-only token for performing pulls. + username: getpinniped + password: ((getpinniped-dockerhub-image-push-access-token)) + +resources: + + - name: pinniped-source + type: git + icon: github + source: + uri: https://github.com/vmware-tanzu/pinniped.git + branch: main + + - name: pinniped-ci + type: git + icon: github + source: + uri: git@github.com:vmware-tanzu/pinniped.git + branch: ci + private_key: ((source-repo-deploy-key)) + + - name: pinniped-latest-release-image + type: registry-image + icon: docker + source: + repository: ghcr.io/vmware-tanzu/pinniped/pinniped-server + tag: latest + + - name: pinniped-latest-main-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/ci-build + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + tag: latest + + - name: gh-cli-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/gh-cli + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + tag: latest + + - name: golang-image + type: registry-image + icon: docker + source: + repository: docker.io/golang + + - name: crane-image + type: registry-image + icon: docker + source: + repository: ((ci-ghcr-registry))/crane + username: ((ci-ghcr-puller-username)) + password: ((ci-ghcr-puller-token)) + + - name: weekdays + type: time + icon: calendar-clock + source: + location: America/Los_Angeles + start: 6:00 AM + stop: 7:00 AM + days: [ Monday, Tuesday, Wednesday, Thursday, Friday ] + + - name: gchat + type: google-chat-notify-resource + icon: chat-outline + source: + url: ((gchat-project-pinniped-bots-webhook-url)) + +jobs: + + - name: nancy-main + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: weekdays + trigger: true + - get: pinniped-source + - task: get-modules + config: + platform: linux + image_resource: + type: registry-image + source: + repository: docker.io/golang + inputs: + - name: pinniped-source + outputs: + - name: pinniped-modules + run: + dir: "pinniped-source" + path: sh + args: + - "-c" + - | + set -e + echo "Installing jq..." + ( apt-get update -y && apt-get install -y jq ) 2>&1 > install.log || cat install.log + + # Use 'go list' to find package dependencies, then select the associated module versions. + # See https://github.com/sonatype-nexus-community/nancy/issues/228 for details about why + # we can't just use 'go list -mod -json all'. + echo "Listing Go module dependencies..." + go list -deps -json all | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' > ../pinniped-modules/modules.json + - task: scan + config: + platform: linux + image_resource: + type: registry-image + source: + repository: docker.io/sonatypecommunity/nancy + tag: alpine + inputs: + - name: pinniped-modules + run: + path: 'sh' + args: + - '-c' + - | + set -e + cat < exclusions.txt + # Vulnerability exclusions for Nancy: + # https://github.com/sonatype-nexus-community/nancy#exclude-vulnerabilities + # + # When editing this, please add an `until=` tag on each entry so we remember to revisit + # and clean this file later. + # CVE-0000-00000 until=2022-01-01 + # + # CVE-2020-8561 is in k8s.io/apiserver@v0.27.1, + # which is the latest version as of 2023-05-10. + # From the comments on this issue https://github.com/kubernetes/kubernetes/issues/104720 + # it seems like the Kubernetes maintainers are never going to fix it. + # Removing the "until" date on the next line to ignore this CVE forever. + CVE-2020-8561 + EOF + + nancy sleuth --exclude-vulnerability-file=exclusions.txt < pinniped-modules/modules.json + + - name: trivy-release + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: weekdays + trigger: true + - get: pinniped-latest-release-image + params: + format: oci + - get: pinniped-ci + - task: scan + file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml + params: + GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission)) + input_mapping: + image: pinniped-latest-release-image + + - name: trivy-main + public: true # all logs are publicly visible + serial: true + plan: + - in_parallel: + - get: weekdays + trigger: true + - get: pinniped-latest-main-image + params: + format: oci + - get: pinniped-ci + - task: scan + file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml + params: + GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission)) + input_mapping: + image: pinniped-latest-main-image + + - name: all-golang-deps-updated + public: true # all logs are publicly visible + <<: *notify_on_failure + serial: true + plan: + - in_parallel: + - get: weekdays + trigger: true + - get: pinniped-source + - get: pinniped-ci + - get: gh-cli-image + - get: crane-image + - get: golang-image + params: + skip_download: true + - task: check-golang-deps-updated + file: pinniped-ci/pipelines/shared-tasks/check-golang-deps-updated/task.yml + input_mapping: + pinniped-in: pinniped-source + - task: check-dockerfile-deps-updated + image: crane-image + file: pinniped-ci/pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml + input_mapping: + pinniped-in: pinniped-out # the output of the previous task + - task: create-or-update-pr + image: gh-cli-image + file: pinniped-ci/pipelines/shared-tasks/create-or-update-pr/task.yml + params: + DEPLOY_KEY: ((source-repo-deploy-key)) + GH_TOKEN: ((ci-bot-access-token-with-public-repo-write-permission)) + input_mapping: + pinniped: pinniped-out + + - name: run-go-vuln-scan + public: true # all logs are publicly visible + plan: + - in_parallel: + - get: pinniped-source + trigger: true + - get: pinniped-ci + - task: run-go-vuln-scan + file: pinniped-ci/pipelines/shared-tasks/run-go-vuln-scan/task.yml + input_mapping: + pinniped: pinniped-source + params: + BUILD_TAGS: diff --git a/pipelines/security-scan/update-pipeline.sh b/pipelines/security-scan/update-pipeline.sh new file mode 100755 index 000000000..cf860a1e8 --- /dev/null +++ b/pipelines/security-scan/update-pipeline.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +pipeline=$(basename "$script_dir") +source "$script_dir/../../hack/fly-helpers.sh" + +set_pipeline "$pipeline" "$script_dir/pipeline.yml" +ensure_time_resource_has_at_least_one_version "$pipeline" weekdays + +# Make the pipeline visible to non-authenticated users in the web UI. +$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline" diff --git a/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh b/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh new file mode 100755 index 000000000..ce3e9ecd9 --- /dev/null +++ b/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh @@ -0,0 +1,1327 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script is designed to be used in CI to deploy to kind, GKE, and TKGS clusters. +# It is also designed to be run on a development workstation (see hack/prepare-remote-cluster-for-integration-tests.sh). +# +# Goal: +# To prepare the cluster for integration tests, and to write out the environment +# variables needed for running integration tests against the cluster. +# +# Assumptions: +# - The current working directory is the top of the source code repo. +# - The kube config is already set up. +# - The necessary tooling is installed. +# +# Inputs: +# - $CONCIERGE_APP_NAME decides the app_name YTT template value of the Concierge app. +# By default this is set to "concierge". +# - $CONCIERGE_NAMESPACE decides in which namespace the Concierge app should be deployed. +# By default this is set to "concierge". +# - $SUPERVISOR_APP_NAME decides the app_name YTT template value of the Supervisor app. +# By default this is set to "supervisor". +# - $SUPERVISOR_NAMESPACE decides in which namespace the Supervisor app should be deployed. +# By default this is set to "supervisor". +# - $IMAGE_REPO, $IMAGE_TAG, and $IMAGE_DIGEST decide which app container to deploy. +# - Note! The deployment templates prefer IMAGE_DIGEST, so: +# - if both IMAGE_TAG and IMAGE_DIGEST are set, IMAGE_DIGEST is prefered; +# - if IMAGE_TAG is set but IMAGE_DIGEST is not, then IMAGE_TAG is prefered; +# - if IMAGE_TAG is not set but IMAGE_DIGEST is, then IMAGE_DIGEST is prefered. +# - $TMC_API_TOKEN and $TMC_CLUSTER_NAME enables the cluster to be attached to TMC. +# - $PINNIPED_DISCOVERY_URL decides the "discovery_url" ytt template value. +# If the env var is not set, then we use "null" since that will indicate to +# pinniped that we want to use the default discovery URL from the cluster. +# - $PINNIPED_TEST_CLUSTER_CAPABILITY_FILE is the path to a yaml file which describes +# the capabilities of the test cluster. +# - $API_SERVING_CERT_DURATION and $API_SERVING_CERT_RENEW_BEFORE set the +# corresponding values in the YTT template. They are optional. +# - $DEPLOY_LOCAL_USER_AUTHENTICATOR, when set to "yes", will deploy and use the +# local-user-authenticator instead of using the TMC webhook authenticator. +# - $DEPLOY_TEST_TOOLS will deploy the squid proxy, Dex, and OpenLDAP into the cluster. +# If the OKTA_* and JUMPCLOUD_* variables are not present, then Dex and OpenLDAP +# will be configured for the integration tests. +# - To use Okta instead of Dex, use the variables $OKTA_ISSUER, $OKTA_CLI_CLIENT_ID, +# $OKTA_CLI_CALLBACK, $OKTA_ADDITIONAL_SCOPES, $OKTA_USERNAME_CLAIM, $OKTA_GROUPS_CLAIM, +# $OKTA_SUPERVISOR_CLIENT_ID, $OKTA_SUPERVISOR_CLIENT_SECRET, $OKTA_SUPERVISOR_CALLBACK, +# $OKTA_USERNAME, $OKTA_GROUPS, and $OKTA_PASSWORD to configure the Okta client. +# - To use Jumpcloud instead of OpenLDAP, use the variables $JUMPCLOUD_LDAP_HOST, +# $JUMPCLOUD_LDAP_STARTTLS_ONLY_HOST, +# $JUMPCLOUD_LDAP_BIND_ACCOUNT_USERNAME, $JUMPCLOUD_LDAP_BIND_ACCOUNT_PASSWORD, +# $JUMPCLOUD_LDAP_USERS_SEARCH_BASE, $JUMPCLOUD_LDAP_GROUPS_SEARCH_BASE, +# $JUMPCLOUD_LDAP_USER_DN, $JUMPCLOUD_LDAP_USER_CN, $JUMPCLOUD_LDAP_USER_PASSWORD, +# $JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME, $JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE, +# $JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_NAME, $JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_VALUE, +# $JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_DN, $JUMPCLOUD_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN, +# and $JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_CN to configure the LDAP tests. +# - $FIREWALL_IDPS, when set to "yes" will add NetworkPolicies to effectively firewall the Concierge +# and Supervisor pods such that they need to use the Squid proxy server to reach several of the IDPs. +# Note that NetworkPolicy is not supported on all flavors of Kube, but can be enabled on GKE by using +# `--enable-network-policy` when creating the GKE cluster, abd is supported in recent versions of Kind. +# - $TEST_ACTIVE_DIRECTORY determines whether to test against AWS Managed Active +# Directory. Note that there's no "local" equivalent-- for OIDC we use Dex's internal +# user store or Okta, for LDAP we deploy OpenLDAP or use Jumpcloud, +# but for AD there is only the hosted version. +# When set, the tests are configured with the variables +# $AWS_AD_HOST, $AWS_AD_DOMAIN, $AWS_AD_BIND_ACCOUNT_USERNAME, $AWS_AD_BIND_ACCOUNT_PASSWORD, +# AWS_AD_USER_USER_PRINCIPAL_NAME, $AWS_AD_USER_PASSWORD, $AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME, +# $AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE, $AWS_AD_USER_EXPECTED_GROUPS_DN, +# $AWS_AD_USER_EXPECTED_GROUPS_CN, and $AWS_AD_LDAPS_CA_BUNDLE +# - $USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR, when set to "yes", will create LoadBalancers for Dex and Supervisor. +# This script will wait for those LoadBalancers to receive their IP addresses and then use those IP addresses: +# - when configuring Dex with its own issuer URL +# - when configuring Dex with allowed callbacks (for the Supervisor) +# - when configuring the integration test variables for the Supervisor hostname amd Dex's callback URL for the Supervisor +# This option cannot be used with $SUPERVISOR_LOAD_BALANCER==yes or $SUPERVISOR_INGRESS==yes. +# This option must be used with $DEPLOY_TEST_TOOLS==yes. +# - $SUPERVISOR_LOAD_BALANCER, when set to "yes", will deploy the Supervisor +# with a LoadBalancer Service defined. When set to "yes" the following additional +# variables are expected: +# - $SUPERVISOR_LOAD_BALANCER_STATIC_IP: The IP for the load balancer service to +# use. Optional. +# - $SUPERVISOR_LOAD_BALANCER_DNS_NAME: The DNS name associated with the +# load balancer IP address. Required when $SUPERVISOR_LOAD_BALANCER is "yes". +# - If the $SUPERVISOR_LOAD_BALANCER_DNS_NAME is given without the +# $SUPERVISOR_LOAD_BALANCER_STATIC_IP, then allow the load balancer service +# to choose its own IP address, and dynamically register that address as the name +# specified in $SUPERVISOR_LOAD_BALANCER_DNS_NAME using the Cloud DNS service. +# - $SUPERVISOR_INGRESS, when set to "yes", will deploy the Supervisor with a +# NodePort Service defined and create an Ingress connected to that Service. +# When set to "yes" the following additional variables are expected: +# - $SUPERVISOR_INGRESS_STATIC_IP_NAME: The name of the static IP resource from the +# underlying cloud infrastructure platform. Optional. +# - $SUPERVISOR_INGRESS_DNS_NAME: The DNS hostname name associated with the +# ingress' IP address. Required when $SUPERVISOR_INGRESS is "yes". +# - $SUPERVISOR_INGRESS_PATH_PATTERN: The path that will be set in the Ingress object +# (e.g., "/", "/*"; this depends on what is supported by the underlying platform). +# Required when $SUPERVISOR_INGRESS is "yes". +# - If the $SUPERVISOR_INGRESS_DNS_NAME is given without the +# $SUPERVISOR_INGRESS_STATIC_IP_NAME, then allow the ingress service +# to choose its own IP address, and dynamically register that address as the name +# specified in $SUPERVISOR_INGRESS_DNS_NAME using the Cloud DNS service. +# - When neither $SUPERVISOR_LOAD_BALANCER nor $SUPERVISOR_INGRESS then we will use +# nodeport services to make the supervisor available. In this case you may specify +# $PINNIPED_SUPERVISOR_HTTP_NODEPORT and $PINNIPED_SUPERVISOR_HTTPS_NODEPORT if you +# would like to override the default port numbers. +# - $PINNIPED_API_GROUP_SUFFIX decides the "api_group_suffix" ytt value for both +# the Concierge and Supervisor deployments. Optional. The default is to omit the +# "api_group_suffix" option, thus accepting the default from the ytt templates. +# - $SECONDARY_DEPLOY, when set to "yes", assumes that some other invocation of this +# script (another deploy) is responsible for actually deploying the dex and the +# local-user-authenticator. This current (second) invocation will skip deploying dex +# and the local-user-authenticator, them but will still set the test env file flags +# for them if they were requested as if it had deployed them, to allow the integration +# tests to still use them. This is currently only intended to be used on kind clusters, +# so it is not designed to interact with flags that we only use on the acceptance +# cluster deploys like the TMC token and ingress/load balancer flags mentioned above. +# - $SECONDARY_SUPERVISOR_APP_NAME - the app name of the Supervisor that will be used +# in the secondary deploy; this will be used in the primary deploy and ignored in the +# secondary deploy. This is optional, and if you do not set this variable then we expect +# that you do not intend to follow up with a second deploy. +# - $SECONDARY_SUPERVISOR_NAMESPACE - the namespace of the Supervisor that will be used +# in the secondary deploy; this will be used in the primary deploy and ignored in the +# secondary deploy. This is optional, and if you do not set this variable then we expect +# that you do not intend to follow up with a second deploy. +# - $PINNIPED_DEX_TEST_USER_PASSWORD - the password for "pinny" in dex. This only really +# matters when you're deploying multiple pinnipeds, since the password needs to be +# consistent between them. Otherwise one will be generated here. +# - $PINNIPED_LDAP_TEST_USER_PASSWORD - the password for "pinny" in LDAP. This only really +# matters when you're deploying multiple pinnipeds, since the password needs to be +# consistent between them. Otherwise one will be generated here. +# - $SUPERVISOR_AND_CONCIERGE_NO_CPU_REQUEST - when set to any value, causes the CPU requests +# to be unset on the deployments, which helps us squeeze these deployments onto a small cluster. + +# Require kubectl >= 1.18.x. +if [ "$(kubectl version --client=true -o=json | grep gitVersion | cut -d '.' -f 2)" -lt 18 ]; then + echo "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)" + exit 1 +fi + +# extract_env takes a JSON object representing an client.authentication.k8s.io/v1beta1 +# exec credential config (as parameter $1) and pulls out the env var value for the +# provided name (as parameter $2). +function extract_env_value() { + filter=".env[] | select(.name==\"$2\") | .value" + echo "$1" | jq -r "$filter" +} + +function print_or_redact_doc() { + doc_kind=$(echo "$1" | awk '/^kind: / {print $2}') + if [[ -z "$doc_kind" ]]; then + echo "warning: " + elif [[ $doc_kind == "Secret" || $doc_kind == "secret" ]]; then + echo + echo "---" + echo "" + else + printf "%s\n" "$1" + fi +} + +function print_redacted_manifest() { + doc="" + while IFS="" read -r line || [ -n "$line" ]; do + if [[ $line == "---" ]]; then + if [[ -n "$doc" ]]; then + print_or_redact_doc "$doc" + fi + doc="" + fi + doc=$(printf "%s\n%s" "$doc" "$line") + done <"$1" + + print_or_redact_doc "$doc" +} + +function update_gcloud_dns_record() { + if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set when using update_gcloud_dns_record" + exit 1 + fi + + local dns_name=$1 + local new_ip=$2 + local dns_record_name="${dns_name}." + local dns_zone="pinniped-dev" + local dns_project="$PINNIPED_GCP_PROJECT" + + # Login to gcloud CLI + gcloud auth activate-service-account "$GKE_USERNAME" --key-file <(echo "$GKE_JSON_KEY") --project "$dns_project" + + # Get the current value of the DNS A record. + # We assume that this record already exists because it was manually created. + # We also assume in the transaction commands below that it was created with a TTL of 30 seconds. + current_dns_record_ip=$(gcloud dns record-sets list --zone "$dns_zone" \ + --project "$dns_project" --name "$dns_record_name" --format json | + jq -r ".[] | select(.name ==\"${dns_record_name}\") | .rrdatas[0]") + + if [[ "$current_dns_record_ip" == "$new_ip" ]]; then + echo "No update needed: DNS record $dns_record_name was already set to $new_ip" + else + echo "Changing DNS record $dns_record_name from $current_dns_record_ip to $new_ip ..." + + # Updating a DNS record with gcloud must be done with a remove and an add wrapped in a transaction. + gcloud dns record-sets transaction start --zone "$dns_zone" --project "$dns_project" + gcloud dns record-sets transaction remove "$current_dns_record_ip" --name "$dns_name" \ + --ttl "30" --type "A" --zone "$dns_zone" --project "$dns_project" + gcloud dns record-sets transaction add "$new_ip" --name "$dns_name" \ + --ttl "30" --type "A" --zone "$dns_zone" --project "$dns_project" + change_id=$(gcloud dns record-sets transaction execute --zone "$dns_zone" --project "$dns_project" --format json | jq -r '.id') + + # Wait for that transaction to commit. This is usually quick. + change_status="not-done" + while [[ "$change_status" != "done" ]]; do + sleep 3 + change_status=$(gcloud dns record-sets changes describe "$change_id" \ + --zone "$dns_zone" --project "$dns_project" --format json | jq -r '.status') + echo "Waiting for change $change_id to have status 'done'. Current status: $change_status" + done + + # Wait for DNS propagation. The TTL is 30 seconds, so this shouldn't take too long. + echo "Waiting for new IP address $new_ip to appear in the result of a local DNS query. This may take a few minutes..." + while true; do + dig_result=$(dig +short "$dns_name") + echo "dig result for $dns_name: $dig_result" + if [[ "$dig_result" == "$new_ip" ]]; then + echo "New IP address has finished DNS propagation. Done with DNS update!" + break + fi + sleep 5 + done + fi +} + +if [[ "${TMC_API_TOKEN:-}" == "" && "${DEPLOY_LOCAL_USER_AUTHENTICATOR:-no}" != "yes" ]]; then + echo "Must use either \$TMC_API_TOKEN or \$DEPLOY_LOCAL_USER_AUTHENTICATOR" + exit 1 +fi + +if [[ "${TMC_API_TOKEN:-}" != "" ]]; then + tmc_context_name="tanzu-user-authentication-stable" + # I dunno what the valid values for "--management-cluster-name" and + # "--provisioner-name" are, but I know "attached" is one of them. I also + # dunno what a "--provisioner-name" refers to... + attached="attached" + + # This command uses the API token in $TMC_API_TOKEN and points the tmc CLI at the TMC staging env + tmc system context create --stg-stable --name "$tmc_context_name" --no-configure + + echo "Checking if cluster is attached as '$TMC_CLUSTER_NAME'..." + cluster_name="$(kubectl -n vmware-system-tmc get configmaps stack-config -o jsonpath=\{.data.cluster_name\} 2>/dev/null || echo 'no_cluster_name')" + if [[ "$cluster_name" == "$TMC_CLUSTER_NAME" ]]; then + echo "Cluster is already attached as '$TMC_CLUSTER_NAME'..." + elif [[ "$cluster_name" == "no_cluster_name" ]]; then + if ! tmc cluster list --name "$TMC_CLUSTER_NAME" --management-cluster-name "$attached" --provisioner-name "$attached" | grep -q "No clusters to list"; then + echo "Detaching old '$TMC_CLUSTER_NAME'..." + tmc cluster delete "$TMC_CLUSTER_NAME" --force --management-cluster-name "$attached" --provisioner-name "$attached" + # unfortunately it seems like deleting the cluster takes time to propagate before it is safe to attach again + sleep 3 + fi + + echo "Attaching cluster '$TMC_CLUSTER_NAME'..." + manifest="$(mktemp)" + tmc cluster attach --name "$TMC_CLUSTER_NAME" --management-cluster-name "$attached" --provisioner-name "$attached" --output "$manifest" + kubectl apply -f "$manifest" + rm "$manifest" + else + echo "Cluster is already attached as '$cluster_name'" + echo "Please either:" + echo " 1) detach the cluster with something like 'tmc cluster delete $TMC_CLUSTER_NAME --force'" + echo " 2) create a new cluster with 'kind delete cluster && kind create cluster' (or analagous commands for other providers)" + echo "I don't want to mess up your cluster, so I'm gonna bail out" + exit 1 + fi + + # Generate token for testing. + echo "Generating cluster token for testing..." + exec_cred_config="$(tmc cluster auth userconfig get --cluster-name "$TMC_CLUSTER_NAME" --management-cluster-name "$attached" --provisioner-name "$attached" --output json | jq .status.user.exec)" + cluster_uid="$(extract_env_value "$exec_cred_config" CLUSTER_UID)" + cluster_rid="$(extract_env_value "$exec_cred_config" CLUSTER_RID)" + cluster_rid_v2="$(extract_env_value "$exec_cred_config" CLUSTER_RID_V2)" + tmc_environment="$(extract_env_value "$exec_cred_config" TMC_ENVIRONMENT)" + tmc_endpoint="$(extract_env_value "$exec_cred_config" TMC_ENDPOINT)" + + tmc_token_exec_cred="$(CLUSTER_UID="$cluster_uid" \ + CLUSTER_RID="$cluster_rid" \ + CLUSTER_RID_V2="$cluster_rid_v2" \ + TMC_ENVIRONMENT="$tmc_environment" \ + TMC_ENDPOINT="$tmc_endpoint" \ + tmc cluster generate-token-v2)" + tmc_cluster_token="$(echo "$tmc_token_exec_cred" | jq -r .status.token)" + + tmc_server_config="$(tmc cluster auth serverconfig get --cluster-name "$TMC_CLUSTER_NAME" --management-cluster-name "$attached" --provisioner-name "$attached" --output json)" + webhook_url="$(echo "$tmc_server_config" | jq -r .status.authenticationWebhook.endpoint)" + webhook_ca_bundle="$(echo "$tmc_server_config" | jq -r .status.authenticationWebhook.certificateAuthorityData)" +fi + +# Save this file for possible later use. Sometimes we want to remove the CPU requests, +# which also means that we need to remove the limits or else Kubernetes will use the limit as +# an implicit request amount. +cat <>/tmp/remove-cpu-request-overlay.yaml +#@ load("@ytt:overlay", "overlay") +#@overlay/match by=overlay.subset({"kind": "Deployment"}), expects=1 +--- +spec: + template: + spec: + containers: + - #@overlay/match by=overlay.all, expects=1 + resources: + requests: + cpu: + limits: + cpu: +EOF + +# Print for debugging +kubectl config current-context +kubectl version +kubectl cluster-info + +concierge_app_name="${CONCIERGE_APP_NAME:-concierge}" +concierge_namespace="${CONCIERGE_NAMESPACE:-concierge}" +concierge_custom_labels="{myConciergeCustomLabelName: myConciergeCustomLabelValue}" +supervisor_app_name="${SUPERVISOR_APP_NAME:-supervisor}" +supervisor_namespace="${SUPERVISOR_NAMESPACE:-supervisor}" +supervisor_custom_labels="{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}" +discovery_url="${PINNIPED_DISCOVERY_URL:-null}" +manifest=/tmp/manifest.yaml + +test_username="${concierge_app_name}-test-username" +test_groups="${concierge_app_name}-test-group-0,${concierge_app_name}-test-group-1" +test_password="$(openssl rand -hex 16)" +test_user_token="${test_username}:${test_password}" + +dex_test_password="${PINNIPED_DEX_TEST_USER_PASSWORD:-$(openssl rand -hex 16)}" +ldap_test_password="${PINNIPED_LDAP_TEST_USER_PASSWORD:-$(openssl rand -hex 16)}" + +if [[ "${DEPLOY_LOCAL_USER_AUTHENTICATOR:-no}" == "yes" ]]; then + # + # Deploy local-user-authenticator + # + pushd deploy/local-user-authenticator >/dev/null + + # When SECONDARY_DEPLOY == "yes", act like we deployed local-user-authenticator, but don't really. + if [[ "${SECONDARY_DEPLOY:-no}" != "yes" ]]; then + + echo "Deploying the local-user-authenticator app to the cluster..." + ytt --file . \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" >"$manifest" + + echo + echo "Full local-user-authenticator app manifest with Secrets redacted..." + echo "--------------------------------------------------------------------------------" + print_redacted_manifest $manifest + echo "--------------------------------------------------------------------------------" + echo + + set -x + kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest" + { set +x; } 2>/dev/null + + fi + + # Always create a secret, even if this is a secondary deploy. + echo "Creating test user '$test_username'..." + kubectl create secret generic "$test_username" \ + --namespace local-user-authenticator \ + --from-literal=groups="$test_groups" \ + --from-literal=passwordHash="$(htpasswd -nbBC 10 x "$test_password" | sed -e "s/^x://")" \ + --dry-run=client \ + --output yaml | + kubectl apply -f - + + # Override the TMC webhook settings to use the local-user-authenticator instead + webhook_url="https://local-user-authenticator.local-user-authenticator.svc.cluster.local/authenticate" + + # Sometimes the local-user-authenticator pod hasn't generated the serving certificate yet, so we poll until it has. + set +o pipefail + while ! kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator >/dev/null; do + echo "Waiting for local-user-authenticator Secret to be created..." + sleep 1 + done + set -o pipefail + + webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')" + + popd >/dev/null + +else + # Assume TMC when not using local-user-authenticator. + # Use username and groups of our test user as expected values. + test_username="tanzu-user-authentication@groups.vmware.com" + + test_groups_as_array=( + "tmc:member" + "csp:org_member" + "Everyone@vmwareid" + "Everyone@vmwareid@vmwareid" + "OKTA_MASTERED_External@vmwareid" + "Okta_Mastered_External_MFA@vmwareid" + "OKTA_MASTERED_External@vmwareid@vmwareid" + "Okta_Mastered_External_MFA@vmwareid@vmwareid" + "csp-uid:vmwareid:01b58eb7-9a2c-4c02-9a2f-f79532702d14" + ) + # join array elements with comma as delimiter + test_groups=$( + IFS=, + echo "${test_groups_as_array[*]}" + ) + + test_user_token="${tmc_cluster_token}" +fi + +# Do some input checking for USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR +if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" && "${DEPLOY_TEST_TOOLS:-no}" != "yes" ]]; then + echo "You must set DEPLOY_TEST_TOOLS=yes when using USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR=yes" + echo "" + echo "It has no meaning to create a LoadBalancer for Dex when Dex will not exist" + exit 1 +fi + +if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" && "${SUPERVISOR_LOAD_BALANCER:-no}" == "yes" ]]; then + echo "Use no more than one of USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR=yes and SUPERVISOR_LOAD_BALANCER=yes" + echo "" + echo "USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR=yes tells this script to create a LoadBalancer for Dex and Supervisor with dynamic IP addresses" + echo "and will configure the integration tests to use those IP addresses for direct communication with Dex and Supervisor" + echo "SUPERVISOR_LOAD_BALANCER=yes tells this script to create a LoadBalancer for the Supervisor with an IP address and will update the DNS record for SUPERVISOR_LOAD_BALANCER_DNS_NAME". + exit 1 +fi + +if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" && "${SUPERVISOR_INGRESS:-no}" == "yes" ]]; then + echo "Use no more than one of USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR=yes and SUPERVISOR_INGRESS=yes" + echo "" + echo "USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR=yes tells this script to create a LoadBalancer for Dex and Supervisor with dynamic IP addresses" + echo "and will configure the integration tests to use those IP addresses for direct communication with Dex and Supervisor" + echo "SUPERVISOR_INGRESS=yes tells this script to create a NodePort service and an Ingress service for the Supervisor". + exit 1 +fi + +if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" ]]; then + supervisor_service_name="${supervisor_app_name}-loadbalancer" + + # Make a Supervisor LoadBalancer + cat </dev/null + + if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" ]]; then + test_supervisor_upstream_oidc_callback_url="https://${supervisor_loadbalancer_public_ip_or_hostname}/some/path/callback" + else + test_supervisor_upstream_oidc_callback_url="https://${supervisor_app_name}-clusterip.${supervisor_namespace}.svc.cluster.local/some/path/callback" + fi + + # When SECONDARY_DEPLOY == "yes", act like we deployed dex, but don't really. + if [[ "${SECONDARY_DEPLOY:-no}" != "yes" ]]; then + + # If someone has told you about a secondary Supervisor app name, then add it + # on to the list of Dex redirect URIs. + if [[ -n "${SECONDARY_SUPERVISOR_APP_NAME:-}" ]]; then + test_secondary_supervisor_upstream_oidc_callback_url="https://${SECONDARY_SUPERVISOR_APP_NAME}-clusterip.${SECONDARY_SUPERVISOR_NAMESPACE}.svc.cluster.local/some/path/callback" + supervisor_redirect_uris="[ + ${test_supervisor_upstream_oidc_callback_url}, + ${test_secondary_supervisor_upstream_oidc_callback_url} + ]" + else + supervisor_redirect_uris="[ + ${test_supervisor_upstream_oidc_callback_url} + ]" + fi + + echo "Deploying Tools to the cluster..." + dex_optional_ytt_values=() + if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" ]]; then + dex_optional_ytt_values+=("--data-value=dex_issuer_hostname=${dex_loadbalancer_public_ip_or_hostname}") + fi + ytt --file . \ + --data-value-yaml "supervisor_redirect_uris=${supervisor_redirect_uris}" \ + --data-value "pinny_ldap_password=$ldap_test_password" \ + --data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \ + ${dex_optional_ytt_values[@]+"${dex_optional_ytt_values[@]}"} \ + >"$manifest" + + echo + echo "Full Tools manifest with Secrets redacted..." + echo "--------------------------------------------------------------------------------" + print_redacted_manifest $manifest + echo "--------------------------------------------------------------------------------" + echo + + set -x + kapp deploy --yes --app tools --diff-changes --file "$manifest" + { set +x; } 2>/dev/null + fi + + dex_issuer_url="https://dex.tools.svc.cluster.local/dex" + test_proxy="http://127.0.0.1:12346" + if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" == "yes" ]]; then + dex_issuer_url="https://${dex_loadbalancer_public_ip_or_hostname}/dex" + + # The purpose of USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR is specifically to avoid using 'kubectl port-forward', + # so set this to empty so that any integration tests that specifically need the squid proxy will know to not run. + test_proxy="" + fi + + dex_ca_bundle="$(kubectl get secrets -n tools certs -o go-template='{{index .data "ca.pem" | base64decode}}' | base64)" + pinniped_test_tools_namespace="tools" + test_cli_oidc_callback_url="http://127.0.0.1:48095/callback" + test_cli_oidc_client_id="pinniped-cli" + test_cli_oidc_issuer_ca_bundle="${dex_ca_bundle}" + test_cli_oidc_issuer="${dex_issuer_url}" + test_cli_oidc_password="${dex_test_password}" + test_cli_oidc_username="pinny@example.com" + # note that test_supervisor_upstream_oidc_callback_url was already set above + test_supervisor_upstream_oidc_client_id="pinniped-supervisor" + test_supervisor_upstream_oidc_client_secret="pinniped-supervisor-secret" + test_supervisor_upstream_oidc_additional_scopes="offline_access,email" + test_supervisor_upstream_oidc_username_claim="email" + test_supervisor_upstream_oidc_groups_claim="groups" + test_supervisor_upstream_oidc_issuer_ca_bundle="${dex_ca_bundle}" + test_supervisor_upstream_oidc_issuer="${dex_issuer_url}" + test_supervisor_upstream_oidc_password="${dex_test_password}" + test_supervisor_upstream_oidc_username="pinny@example.com" + test_supervisor_upstream_oidc_groups="" # Dex's local user store does not let us configure groups. + pinniped_test_ldap_host="ldap.tools.svc.cluster.local" + pinniped_test_ldap_starttls_only_host="ldapstarttls.tools.svc.cluster.local" + pinniped_test_ldap_ldaps_ca_bundle="${dex_ca_bundle}" + pinniped_test_ldap_bind_account_username="cn=admin,dc=pinniped,dc=dev" + pinniped_test_ldap_bind_account_password=password + pinniped_test_ldap_users_search_base="ou=users,dc=pinniped,dc=dev" + pinniped_test_ldap_groups_search_base="ou=groups,dc=pinniped,dc=dev" + pinniped_test_ldap_user_dn="cn=pinny,ou=users,dc=pinniped,dc=dev" + pinniped_test_ldap_user_cn="pinny" + pinniped_test_ldap_user_password=${ldap_test_password} + pinniped_test_ldap_user_unique_id_attribute_name="uidNumber" + pinniped_test_ldap_user_unique_id_attribute_value="1000" + pinniped_test_ldap_user_email_attribute_name="mail" + pinniped_test_ldap_user_email_attribute_value="pinny.ldap@example.com" + pinniped_test_ldap_expected_direct_groups_dn="cn=ball-game-players,ou=beach-groups,ou=groups,dc=pinniped,dc=dev;cn=seals,ou=groups,dc=pinniped,dc=dev" + pinniped_test_ldap_expected_indirect_groups_dn="cn=pinnipeds,ou=groups,dc=pinniped,dc=dev;cn=mammals,ou=groups,dc=pinniped,dc=dev" + pinniped_test_ldap_expected_direct_groups_cn="ball-game-players;seals" + pinniped_test_ldap_expected_direct_posix_groups_cn="ball-game-players-posix;seals-posix" + pinniped_test_ldap_expected_indirect_groups_cn="pinnipeds;mammals" + + popd >/dev/null +else + # Did not deploy the tools namespace. + pinniped_test_tools_namespace="" # tools were not deployed, so leave empty + # The squid proxy in the tools namespace was not deployed, so do not use a proxy. + test_proxy="" + # The tools namespace was not deployed, so do not use the .svc.cluster.local hostname. + # Instead use the real hostname of the Supervisor. + test_supervisor_upstream_oidc_callback_url="https://$SUPERVISOR_LOAD_BALANCER_DNS_NAME/test-issuer/callback" +fi + +# Whether or not the tools namespace is deployed, we can configure the integration +# tests to use Okta instead of Dex as the OIDC provider. +if [[ "${OKTA_ISSUER:-no}" != "no" ]]; then + test_cli_oidc_callback_url="$OKTA_CLI_CALLBACK" + test_cli_oidc_client_id="$OKTA_CLI_CLIENT_ID" + test_cli_oidc_issuer_ca_bundle="" + test_cli_oidc_issuer="$OKTA_ISSUER" + test_cli_oidc_password="$OKTA_PASSWORD" + test_cli_oidc_username="$OKTA_USERNAME" + # Note that we are not overwriting the test_supervisor_upstream_oidc_callback_url variable, + # which was set by the if/else statement above. This is because the value of that variable + # should be decided based on the hostname of the Supervisor, which could be a .svc.cluster.local + # address or it could be a real DNS entry, depending on how the cluster was deployed. + test_supervisor_upstream_oidc_client_id="$OKTA_SUPERVISOR_CLIENT_ID" + test_supervisor_upstream_oidc_client_secret="$OKTA_SUPERVISOR_CLIENT_SECRET" + test_supervisor_upstream_oidc_additional_scopes="$OKTA_ADDITIONAL_SCOPES" + test_supervisor_upstream_oidc_username_claim="$OKTA_USERNAME_CLAIM" + test_supervisor_upstream_oidc_groups_claim="$OKTA_GROUPS_CLAIM" + test_supervisor_upstream_oidc_issuer_ca_bundle="" + test_supervisor_upstream_oidc_issuer="$OKTA_ISSUER" + test_supervisor_upstream_oidc_password="$OKTA_PASSWORD" + test_supervisor_upstream_oidc_username="$OKTA_USERNAME" + test_supervisor_upstream_oidc_groups="$OKTA_GROUPS" +fi + +# Whether or not the tools namespace is deployed, we can configure the integration +# tests to use Jumpcloud instead of OpenLDAP as the LDAP provider. +if [[ "${JUMPCLOUD_LDAP_HOST:-no}" != "no" ]]; then + pinniped_test_ldap_host="$JUMPCLOUD_LDAP_HOST" + pinniped_test_ldap_starttls_only_host="$JUMPCLOUD_LDAP_STARTTLS_ONLY_HOST" + pinniped_test_ldap_ldaps_ca_bundle="" + pinniped_test_ldap_bind_account_username="$JUMPCLOUD_LDAP_BIND_ACCOUNT_USERNAME" + pinniped_test_ldap_bind_account_password="$JUMPCLOUD_LDAP_BIND_ACCOUNT_PASSWORD" + pinniped_test_ldap_users_search_base="$JUMPCLOUD_LDAP_USERS_SEARCH_BASE" + pinniped_test_ldap_groups_search_base="$JUMPCLOUD_LDAP_GROUPS_SEARCH_BASE" + pinniped_test_ldap_user_dn="$JUMPCLOUD_LDAP_USER_DN" + pinniped_test_ldap_user_cn="$JUMPCLOUD_LDAP_USER_CN" + pinniped_test_ldap_user_password="$JUMPCLOUD_LDAP_USER_PASSWORD" + pinniped_test_ldap_user_unique_id_attribute_name="$JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME" + pinniped_test_ldap_user_unique_id_attribute_value="$JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE" + pinniped_test_ldap_user_email_attribute_name="$JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_NAME" + pinniped_test_ldap_user_email_attribute_value="$JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_VALUE" + pinniped_test_ldap_expected_direct_groups_dn="$JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_DN" + pinniped_test_ldap_expected_indirect_groups_dn="" + pinniped_test_ldap_expected_direct_groups_cn="$JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_CN" + pinniped_test_ldap_expected_direct_posix_groups_cn="$JUMPCLOUD_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN" + pinniped_test_ldap_expected_indirect_groups_cn="" +fi + +if [[ "${TEST_ACTIVE_DIRECTORY:-no}" == "yes" ]]; then + # there's no way to test active directory locally... it has to be aws managed ad or nothing. + # this is a separate toggle from $DEPLOY_TEST_TOOLS so we can run against ad once in the pr pipeline + # without doing so every time + pinniped_test_ad_host="$AWS_AD_HOST" + pinniped_test_ad_domain="$AWS_AD_DOMAIN" + pinniped_test_ad_bind_account_username="$AWS_AD_BIND_ACCOUNT_USERNAME" + pinniped_test_ad_bind_account_password="$AWS_AD_BIND_ACCOUNT_PASSWORD" + pinniped_test_ad_user_password="$AWS_AD_USER_PASSWORD" + pinniped_test_ad_user_unique_id_attribute_name="$AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME" + pinniped_test_ad_user_unique_id_attribute_value="$AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE" + pinniped_test_ad_user_user_principal_name="$AWS_AD_USER_USER_PRINCIPAL_NAME" + pinniped_test_ad_user_expected_groups_dn="$AWS_AD_USER_EXPECTED_GROUPS_DN" + pinniped_test_ad_user_expected_groups_cn="$AWS_AD_USER_EXPECTED_GROUPS_CN" + pinniped_test_ad_user_expected_indirect_groups_samaccountname="$AWS_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME" + pinniped_test_ad_user_expected_indirect_groups_samaccountname_domainnames="$AWS_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES" + pinniped_test_ad_ldaps_ca_bundle="$AWS_AD_LDAPS_CA_BUNDLE" + pinniped_test_deactivated_ad_user_samaccountname="$AWS_AD_DEACTIVATED_USER_SAMACCOUNTNAME" + pinniped_test_deactivated_ad_user_password="$AWS_AD_DEACTIVATED_USER_PASSWORD" + pinniped_test_ad_user_email_attribute_name="mail" + pinniped_test_ad_user_email_attribute_value="$AWS_AD_USER_EMAIL_ATTRIBUTE_VALUE" + pinniped_test_ad_defaultnamingcontext_dn="$AWS_AD_DEFAULTNAMINGCONTEXT_DN" + pinniped_test_ad_users_dn="$AWS_AD_USERS_DN" +else + pinniped_test_ad_host="" + pinniped_test_ad_domain="" + pinniped_test_ad_bind_account_username="" + pinniped_test_ad_bind_account_password="" + pinniped_test_ad_user_password="" + pinniped_test_ad_user_unique_id_attribute_name="" + pinniped_test_ad_user_unique_id_attribute_value="" + pinniped_test_ad_user_user_principal_name="" + pinniped_test_ad_user_expected_groups_dn="" + pinniped_test_ad_user_expected_groups_cn="" + pinniped_test_ad_user_expected_indirect_groups_samaccountname="" + pinniped_test_ad_user_expected_indirect_groups_samaccountname_domainnames="" + pinniped_test_ad_ldaps_ca_bundle="" + pinniped_test_deactivated_ad_user_samaccountname="" + pinniped_test_deactivated_ad_user_password="" + pinniped_test_ad_user_email_attribute_name="" + pinniped_test_ad_user_email_attribute_value="" + pinniped_test_ad_defaultnamingcontext_dn="" + pinniped_test_ad_users_dn="" +fi + +if [[ "${PINNIPED_TEST_GITHUB_APP_CLIENT_ID:-none}" != "none" ]]; then + pinniped_test_github_app_client_id="$PINNIPED_TEST_GITHUB_APP_CLIENT_ID" + pinniped_test_github_app_client_secret="$PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET" + pinniped_test_github_oauth_app_client_id="$PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID" + pinniped_test_github_oauth_app_client_secret="$PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET" + pinniped_test_github_oauth_app_allowed_callback_url="$PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL" + pinniped_test_github_user_username="$PINNIPED_TEST_GITHUB_USER_USERNAME" + pinniped_test_github_user_password="$PINNIPED_TEST_GITHUB_USER_PASSWORD" + pinniped_test_github_user_otp_secret="$PINNIPED_TEST_GITHUB_USER_OTP_SECRET" + pinniped_test_github_userid="$PINNIPED_TEST_GITHUB_USERID" + pinniped_test_github_org="$PINNIPED_TEST_GITHUB_ORG" + pinniped_test_github_expected_team_names="$PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES" + pinniped_test_github_expected_team_slugs="$PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS" +else + pinniped_test_github_app_client_id="" + pinniped_test_github_app_client_secret="" + pinniped_test_github_oauth_app_client_id="" + pinniped_test_github_oauth_app_client_secret="" + pinniped_test_github_oauth_app_allowed_callback_url="" + pinniped_test_github_user_username="" + pinniped_test_github_user_password="" + pinniped_test_github_user_otp_secret="" + pinniped_test_github_userid="" + pinniped_test_github_org="" + pinniped_test_github_expected_team_names="" + pinniped_test_github_expected_team_slugs="" +fi + +# +# Deploy Concierge +# +pushd deploy/concierge >/dev/null + +# Prepare ytt flags that should be either added to set a custom value or omitted to accept the default from ytt. +concierge_optional_ytt_values=() +if [[ -n "${PINNIPED_API_GROUP_SUFFIX:-}" ]]; then + concierge_optional_ytt_values+=("--data-value-yaml=api_group_suffix=${PINNIPED_API_GROUP_SUFFIX}") +fi +if [[ -n "${SUPERVISOR_AND_CONCIERGE_NO_CPU_REQUEST:-}" ]]; then + concierge_optional_ytt_values+=("--file=/tmp/remove-cpu-request-overlay.yaml") +fi +if [[ "${FIREWALL_IDPS:-no}" == "yes" ]]; then + # Configure the web proxy on the Concierge pods. Note that .svc and .cluster.local are not included, + # so requests for things like pinniped-supervisor-clusterip.supervisor.svc.cluster.local and + # local-user-authenticator.local-user-authenticator.svc.cluster.local will go through the web proxy. + concierge_optional_ytt_values+=("--data-value=https_proxy=http://proxy.tools.svc.cluster.local:3128") + concierge_optional_ytt_values+=("--data-value=no_proxy=\$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost") +fi + +echo "Deploying the Concierge app to the cluster..." +ytt --file . \ + --data-value "app_name=$concierge_app_name" \ + --data-value "namespace=$concierge_namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" \ + --data-value-yaml "image_pull_dockerconfigjson=${IMAGE_PULL_SECRET:-}" \ + --data-value "api_serving_certificate_duration_seconds=${API_SERVING_CERT_DURATION:-2592000}" \ + --data-value "api_serving_certificate_renew_before_seconds=${API_SERVING_CERT_RENEW_BEFORE:-2160000}" \ + --data-value "log_level=debug" \ + --data-value-yaml "custom_labels=$concierge_custom_labels" \ + --data-value "discovery_url=$discovery_url" \ + ${concierge_optional_ytt_values[@]+"${concierge_optional_ytt_values[@]}"} \ + >"$manifest" + +echo +echo "Full Concierge app manifest with Secrets redacted..." +echo "--------------------------------------------------------------------------------" +print_redacted_manifest $manifest +echo "--------------------------------------------------------------------------------" +echo + +set -x +kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest" + +if ! { (($(kubectl version --output json | jq -r .serverVersion.major) == 1)) && (($(kubectl version --output json | jq -r .serverVersion.minor) < 19)); }; then + # Also perform a dry-run create with kubectl just to see if there are any validation errors. + # Skip this on very old clusters, since we use some API fields (like seccompProfile) which did not exist back then. + # Use can still install on these clusters by using kapp or by using kubectl --validate=false. + kubectl create --dry-run=client -f "$manifest" +fi + +{ set +x; } 2>/dev/null + +popd >/dev/null + +# +# Deploy Supervisor +# +pushd deploy/supervisor >/dev/null + +supervisor_ytt_service_flags=() +if [[ "${USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR:-no}" != "yes" ]]; then + if [[ "${SUPERVISOR_LOAD_BALANCER:-no}" == "yes" ]]; then + supervisor_ytt_service_flags+=("--data-value-yaml=service_https_loadbalancer_port=443") + if [[ "${SUPERVISOR_LOAD_BALANCER_STATIC_IP:-}" != "" ]]; then + supervisor_ytt_service_flags+=("--data-value=service_loadbalancer_ip=$SUPERVISOR_LOAD_BALANCER_STATIC_IP") + fi + fi + if [[ "${SUPERVISOR_INGRESS:-no}" == "yes" ]]; then + # even when we have functioning ingress, we need a TCP connection to the supervisor https port to test its TLS config + supervisor_ytt_service_flags+=("--data-value-yaml=service_https_nodeport_port=443") + fi + if [[ "${SUPERVISOR_LOAD_BALANCER:-no}" == "no" && "${SUPERVISOR_INGRESS:-no}" == "no" ]]; then + # When no specific service was requested for the supervisor, we assume we are running on + # kind, and therefore expect to talk to the supervisor via NodePort and ClusterIP services. + # This nodePort is the same port number is hardcoded in the port forwarding of our kind configuration. + supervisor_ytt_service_flags+=("--data-value-yaml=service_https_nodeport_port=443") + supervisor_ytt_service_flags+=("--data-value-yaml=service_https_clusterip_port=443") + supervisor_ytt_service_flags+=("--data-value-yaml=service_https_nodeport_nodeport=${PINNIPED_SUPERVISOR_HTTPS_NODEPORT:-31243}") + fi +fi + +# Prepare ytt flags that should be either added to set a custom value or omitted to accept the default from ytt. +supervisor_optional_ytt_values=() +if [[ -n "${PINNIPED_API_GROUP_SUFFIX:-}" ]]; then + supervisor_optional_ytt_values+=("--data-value-yaml=api_group_suffix=${PINNIPED_API_GROUP_SUFFIX}") +fi +if [[ -n "${SUPERVISOR_AND_CONCIERGE_NO_CPU_REQUEST:-}" ]]; then + supervisor_optional_ytt_values+=("--file=/tmp/remove-cpu-request-overlay.yaml") +fi +if [[ "${FIREWALL_IDPS:-no}" == "yes" ]]; then + # Configure the web proxy on the Supervisor pods. Note that .svc and .cluster.local are not included, + # so requests for things like dex.tools.svc.cluster.local will go through the web proxy. + supervisor_optional_ytt_values+=("--data-value=https_proxy=http://proxy.tools.svc.cluster.local:3128") + supervisor_optional_ytt_values+=("--data-value=no_proxy=\$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost") +fi + +echo "Deploying the Supervisor app to the cluster..." +ytt --file . \ + --data-value "app_name=$supervisor_app_name" \ + --data-value "namespace=$supervisor_namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" \ + --data-value-yaml "image_pull_dockerconfigjson=${IMAGE_PULL_SECRET:-}" \ + --data-value "log_level=debug" \ + --data-value-yaml "custom_labels=$supervisor_custom_labels" \ + "${supervisor_ytt_service_flags[@]}" \ + ${supervisor_optional_ytt_values[@]+"${supervisor_optional_ytt_values[@]}"} \ + >"$manifest" + +echo +echo "Full Supervisor app manifest with Secrets redacted..." +echo "--------------------------------------------------------------------------------" +print_redacted_manifest $manifest +echo "--------------------------------------------------------------------------------" +echo + +set -x +kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest" + +if ! { (($(kubectl version --output json | jq -r .serverVersion.major) == 1)) && (($(kubectl version --output json | jq -r .serverVersion.minor) < 23)); }; then + # Also perform a dry-run create with kubectl just to see if there are any validation errors. + # Skip this on very old clusters, since we use some API fields (like seccompProfile) which did not exist back then. + # In the Supervisor CRDs we began to use CEL validations which were introduced in Kubernetes 1.23. + # Use can still install on these clusters by using kapp or by using kubectl --validate=false. + kubectl create --dry-run=client -f "$manifest" +fi + +{ set +x; } 2>/dev/null + +# Now that the everything is deployed, optionally firewall the Dex server, the local user authenticator server, +# and the GitHub API so that the Supervisor and Concierge cannot reach them directly. However, the Squid +# proxy server can reach them all, so the Supervisor and Concierge can reach them through the proxy. +if [[ "${FIREWALL_IDPS:-no}" == "yes" ]]; then + echo "Setting up firewalls for the Supervisor and Concierge's outgoing TCP/UDP network traffic..." + cat <"$ca_cert_config_file" +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no +[req_distinguished_name] +C = US +ST = California +L = San Francisco +O = Pinniped +OU = Pinniped Testing CA +[v3_req] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical,CA:TRUE,pathlen:0 +keyUsage = keyCertSign, digitalSignature +EOF + + ca_key_file="/tmp/ingress-tls-ca-cert.key" + openssl req \ + -new \ + -x509 \ + -config "$ca_cert_config_file" \ + -days 36500 \ + -sha256 \ + -out "$ingress_tls_ca_cert_file" \ + -newkey rsa:2048 \ + -keyout "$ca_key_file" \ + -nodes + echo "Creating ingress tls CA secret: $ingress_tls_ca_secret" + kubectl -n "$supervisor_namespace" create secret tls "$ingress_tls_ca_secret" \ + --cert="$ingress_tls_ca_cert_file" --key="$ca_key_file" + + cert_config_file="/tmp/ingress-tls-cert.conf" + cat <"$cert_config_file" +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no +[req_distinguished_name] +CN = ${SUPERVISOR_INGRESS_DNS_NAME} +C = US +ST = California +L = San Fransisco +O = Pinniped +[v3_req] +basicConstraints = CA:FALSE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid +keyUsage = keyEncipherment, digitalSignature +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = ${SUPERVISOR_INGRESS_DNS_NAME} +EOF + + key_file="/tmp/ingress-tls-cert.key" + csr_file="/tmp/ingress-tls-cert.csr" + openssl req \ + -new \ + -config "$cert_config_file" \ + -sha256 \ + -out "$csr_file" \ + -newkey rsa:2048 \ + -keyout "$key_file" \ + -nodes + openssl x509 \ + -req \ + -in "$csr_file" \ + -extfile "$cert_config_file" \ + -extensions 'v3_req' \ + -days 36500 \ + -sha256 \ + -out "$ingress_tls_cert_file" \ + -CA "$ingress_tls_ca_cert_file" \ + -CAkey "$ca_key_file" \ + -CAcreateserial + + echo "Creating ingress tls secret: $ingress_tls_secret" + kubectl -n "$supervisor_namespace" create secret tls "$ingress_tls_secret" \ + --cert="$ingress_tls_cert_file" --key="$key_file" + else + # The Secret already exists, so just read the server's public key from it. + kubectl get -n "$supervisor_namespace" secret "$ingress_tls_ca_secret" -o jsonpath=\{.data.'tls\.crt'\} | base64 -d >"$ingress_tls_ca_cert_file" + kubectl get -n "$supervisor_namespace" secret "$ingress_tls_secret" -o jsonpath=\{.data.'tls\.crt'\} | base64 -d >"$ingress_tls_cert_file" + fi + + # If a static IP name was provided then use it. Otherwise, don't include the annotation at all. + static_ip_annotation="" + if [[ "${SUPERVISOR_INGRESS_STATIC_IP_NAME:-}" != "" ]]; then + static_ip_annotation="kubernetes.io/ingress.global-static-ip-name: ${SUPERVISOR_INGRESS_STATIC_IP_NAME}" + fi + + # If the BackendConfig resource exists (i.e. if it is a GKE cluster)... + if kubectl api-resources --api-group cloud.google.com -o name | grep -q backendconfigs.cloud.google.com; then + # Get the nodePort port number that was dynamically assigned to the nodeport service. + nodeport_service_port=$(kubectl get service -n "${supervisor_namespace}" "${supervisor_app_name}-nodeport" -o jsonpath='{.spec.ports[0].nodePort}') + echo "${supervisor_app_name}-nodeport Service was assigned nodePort $nodeport_service_port" + + # Create or update a BackendConfig to configure the health checks that will be used by the Ingress for its backend Service. + cat </dev/null + +# +# Set up the integration test env vars +# +pinniped_cluster_capability_file_content=$(cat "$PINNIPED_TEST_CLUSTER_CAPABILITY_FILE") + +cat </tmp/integration-test-env +export PINNIPED_TEST_TOOLS_NAMESPACE='${pinniped_test_tools_namespace}' +export PINNIPED_TEST_CONCIERGE_NAMESPACE='${concierge_namespace}' +export PINNIPED_TEST_CONCIERGE_APP_NAME='${concierge_app_name}' +export PINNIPED_TEST_CONCIERGE_CUSTOM_LABELS='${concierge_custom_labels}' +export PINNIPED_TEST_USER_USERNAME='${test_username}' +export PINNIPED_TEST_USER_GROUPS='${test_groups}' +export PINNIPED_TEST_USER_TOKEN='${test_user_token}' +export PINNIPED_TEST_WEBHOOK_ENDPOINT='${webhook_url}' +export PINNIPED_TEST_WEBHOOK_CA_BUNDLE='${webhook_ca_bundle}' +export PINNIPED_TEST_SUPERVISOR_NAMESPACE='${supervisor_namespace}' +export PINNIPED_TEST_SUPERVISOR_APP_NAME='${supervisor_app_name}' +export PINNIPED_TEST_SUPERVISOR_SERVICE_NAME='${supervisor_service_name}' +export PINNIPED_TEST_SUPERVISOR_CUSTOM_LABELS='${supervisor_custom_labels}' +export PINNIPED_TEST_SUPERVISOR_HTTPS_ADDRESS='${supervisor_https_address}' +export PINNIPED_TEST_SUPERVISOR_HTTPS_INGRESS_ADDRESS='${supervisor_https_ingress_address}' +export PINNIPED_TEST_SUPERVISOR_HTTPS_INGRESS_CA_BUNDLE='${supervisor_https_ingress_ca_bundle}' +export PINNIPED_TEST_PROXY='${test_proxy}' +export PINNIPED_TEST_LDAP_HOST='${pinniped_test_ldap_host}' +export PINNIPED_TEST_LDAP_STARTTLS_ONLY_HOST='${pinniped_test_ldap_starttls_only_host}' +export PINNIPED_TEST_LDAP_LDAPS_CA_BUNDLE='${pinniped_test_ldap_ldaps_ca_bundle}' +export PINNIPED_TEST_LDAP_BIND_ACCOUNT_USERNAME='${pinniped_test_ldap_bind_account_username}' +export PINNIPED_TEST_LDAP_BIND_ACCOUNT_PASSWORD='${pinniped_test_ldap_bind_account_password}' +export PINNIPED_TEST_LDAP_USERS_SEARCH_BASE='${pinniped_test_ldap_users_search_base}' +export PINNIPED_TEST_LDAP_GROUPS_SEARCH_BASE='${pinniped_test_ldap_groups_search_base}' +export PINNIPED_TEST_LDAP_USER_DN='${pinniped_test_ldap_user_dn}' +export PINNIPED_TEST_LDAP_USER_CN='${pinniped_test_ldap_user_cn}' +export PINNIPED_TEST_LDAP_USER_PASSWORD='${pinniped_test_ldap_user_password}' +export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME='${pinniped_test_ldap_user_unique_id_attribute_name}' +export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE='${pinniped_test_ldap_user_unique_id_attribute_value}' +export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_NAME='${pinniped_test_ldap_user_email_attribute_name}' +export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_VALUE='${pinniped_test_ldap_user_email_attribute_value}' +export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_DN='${pinniped_test_ldap_expected_direct_groups_dn}' +export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_DN='${pinniped_test_ldap_expected_indirect_groups_dn}' +export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_CN='${pinniped_test_ldap_expected_direct_groups_cn}' +export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN='${pinniped_test_ldap_expected_direct_posix_groups_cn}' +export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_CN='${pinniped_test_ldap_expected_indirect_groups_cn}' +export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL='${test_cli_oidc_callback_url}' +export PINNIPED_TEST_CLI_OIDC_CLIENT_ID='${test_cli_oidc_client_id}' +export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE='${test_cli_oidc_issuer_ca_bundle}' +export PINNIPED_TEST_CLI_OIDC_ISSUER='${test_cli_oidc_issuer}' +export PINNIPED_TEST_CLI_OIDC_PASSWORD='${test_cli_oidc_password}' +export PINNIPED_TEST_CLI_OIDC_USERNAME='${test_cli_oidc_username}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CALLBACK_URL='${test_supervisor_upstream_oidc_callback_url}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES='${test_supervisor_upstream_oidc_additional_scopes}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME_CLAIM='${test_supervisor_upstream_oidc_username_claim}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_GROUPS_CLAIM='${test_supervisor_upstream_oidc_groups_claim}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_ID='${test_supervisor_upstream_oidc_client_id}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_SECRET='${test_supervisor_upstream_oidc_client_secret}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE='${test_supervisor_upstream_oidc_issuer_ca_bundle}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER='${test_supervisor_upstream_oidc_issuer}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD='${test_supervisor_upstream_oidc_password}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME='${test_supervisor_upstream_oidc_username}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_EXPECTED_GROUPS='${test_supervisor_upstream_oidc_groups}' +export PINNIPED_TEST_AD_HOST='${pinniped_test_ad_host}' +export PINNIPED_TEST_AD_DOMAIN='${pinniped_test_ad_domain}' +export PINNIPED_TEST_AD_BIND_ACCOUNT_USERNAME='${pinniped_test_ad_bind_account_username}' +export PINNIPED_TEST_AD_BIND_ACCOUNT_PASSWORD='${pinniped_test_ad_bind_account_password}' +export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME='${pinniped_test_ad_user_unique_id_attribute_name}' +export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE='${pinniped_test_ad_user_unique_id_attribute_value}' +export PINNIPED_TEST_AD_USER_USER_PRINCIPAL_NAME='${pinniped_test_ad_user_user_principal_name}' +export PINNIPED_TEST_AD_USER_PASSWORD='${pinniped_test_ad_user_password}' +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_DN='${pinniped_test_ad_user_expected_groups_dn}' +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_CN='${pinniped_test_ad_user_expected_groups_cn}' +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME='${pinniped_test_ad_user_expected_indirect_groups_samaccountname}' +export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES='${pinniped_test_ad_user_expected_indirect_groups_samaccountname_domainnames}' +export PINNIPED_TEST_AD_LDAPS_CA_BUNDLE='${pinniped_test_ad_ldaps_ca_bundle}' +export PINNIPED_TEST_DEACTIVATED_AD_USER_SAMACCOUNTNAME='${pinniped_test_deactivated_ad_user_samaccountname}' +export PINNIPED_TEST_DEACTIVATED_AD_USER_PASSWORD='${pinniped_test_deactivated_ad_user_password}' +export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_NAME='${pinniped_test_ad_user_email_attribute_name}' +export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_VALUE='${pinniped_test_ad_user_email_attribute_value}' +export PINNIPED_TEST_AD_DEFAULTNAMINGCONTEXT_DN='${pinniped_test_ad_defaultnamingcontext_dn}' +export PINNIPED_TEST_AD_USERS_DN='${pinniped_test_ad_users_dn}' +export PINNIPED_TEST_GITHUB_APP_CLIENT_ID='${pinniped_test_github_app_client_id}' +export PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET='${pinniped_test_github_app_client_secret}' +export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID='${pinniped_test_github_oauth_app_client_id}' +export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET='${pinniped_test_github_oauth_app_client_secret}' +export PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL='${pinniped_test_github_oauth_app_allowed_callback_url}' +export PINNIPED_TEST_GITHUB_USER_USERNAME='${pinniped_test_github_user_username}' +export PINNIPED_TEST_GITHUB_USER_PASSWORD='${pinniped_test_github_user_password}' +export PINNIPED_TEST_GITHUB_USER_OTP_SECRET='${pinniped_test_github_user_otp_secret}' +export PINNIPED_TEST_GITHUB_USERID='${pinniped_test_github_userid}' +export PINNIPED_TEST_GITHUB_ORG='${pinniped_test_github_org}' +export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES='${pinniped_test_github_expected_team_names}' +export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS='${pinniped_test_github_expected_team_slugs}' +export PINNIPED_TEST_SHELL_CONTAINER_IMAGE="ghcr.io/pinniped-ci-bot/test-kubectl:latest" + +read -r -d '' PINNIPED_TEST_CLUSTER_CAPABILITY_YAML << PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF || true +${pinniped_cluster_capability_file_content} +PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF + +export PINNIPED_TEST_CLUSTER_CAPABILITY_YAML +EOF + +if [[ -n "${PINNIPED_API_GROUP_SUFFIX:-}" ]]; then + # Only when $PINNIPED_API_GROUP_SUFFIX was passed in, then also append the related flag in the test env, + # because it has a good default value in the integration test helper library. + cat <>/tmp/integration-test-env + +export PINNIPED_TEST_API_GROUP_SUFFIX='${PINNIPED_API_GROUP_SUFFIX}' +EOF +fi diff --git a/pipelines/shared-helpers/test-binaries-image/Dockerfile b/pipelines/shared-helpers/test-binaries-image/Dockerfile new file mode 100644 index 000000000..4a8eba7cf --- /dev/null +++ b/pipelines/shared-helpers/test-binaries-image/Dockerfile @@ -0,0 +1,27 @@ +# syntax = docker/dockerfile:experimental + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +# Using bullseye (debian 11) until google/cloud-sdk starts using bookworm (debian 12) because the +# test binaries built by this dockerfile are run in a container built by dockerfiles/integration-test-runner/Dockerfile +# which uses google/cloud-sdk as the base image. Mismatching debian versions causes the pinniped-integration-test +# built below to error upon execution complaining that the expected version of GLIBC is not found. +FROM golang:1.23.2-bullseye as build-env +WORKDIR /work +COPY . . +ARG GOPROXY +RUN \ + --mount=type=cache,target=/cache/gocache \ + --mount=type=cache,target=/cache/gomodcache \ + mkdir out && \ + export GOCACHE=/cache/gocache && \ + export GOMODCACHE=/cache/gomodcache && \ + export GOOS=linux && \ + export GOARCH=amd64 && \ + CGO_ENABLED=0 go build -v -ldflags "$(hack/get-ldflags.sh)" -o out/pinniped ./cmd/pinniped/main.go && \ + CGO_ENABLED=1 go test -c -vet=off -race ./test/integration -o out/pinniped-integration-test # need cgo for race detector + +FROM scratch +COPY --from=build-env /work/out/ /usr/local/bin/ diff --git a/pipelines/shared-helpers/test-binaries-image/Dockerfile.dockerignore b/pipelines/shared-helpers/test-binaries-image/Dockerfile.dockerignore new file mode 100644 index 000000000..106abb0ed --- /dev/null +++ b/pipelines/shared-helpers/test-binaries-image/Dockerfile.dockerignore @@ -0,0 +1,10 @@ +./.* +./*.md +./*.yaml +./apis +./deploy +./Dockerfile +./generated/1.1* +./internal/mocks +./LICENSE +./site/ diff --git a/pipelines/shared-helpers/test-binaries-image/Dockerfile_fips b/pipelines/shared-helpers/test-binaries-image/Dockerfile_fips new file mode 100644 index 000000000..22df105f7 --- /dev/null +++ b/pipelines/shared-helpers/test-binaries-image/Dockerfile_fips @@ -0,0 +1,31 @@ +# syntax = docker/dockerfile:experimental + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# we need a separate dockerfile for the fips test image so that the integration tests +# use the right ciphers etc. + +# Using bullseye (debian 11) until google/cloud-sdk starts using bookworm (debian 12) because the +# test binaries built by this dockerfile are run in a container built by dockerfiles/integration-test-runner/Dockerfile +# which uses google/cloud-sdk as the base image. Mismatching debian versions causes the pinniped-integration-test +# built below to error upon execution complaining that the expected version of GLIBC is not found. +FROM golang:1.23.2-bullseye as build-env +WORKDIR /work +COPY . . +ARG GOPROXY +# build the cli with strict fips +RUN \ + --mount=target=. \ + --mount=type=cache,target=/cache/gocache \ + --mount=type=cache,target=/cache/gomodcache \ + export GOCACHE=/cache/gocache GOMODCACHE=/cache/gomodcache && \ + mkdir /tmp/out && \ + export GOOS=linux && \ + export GOARCH=amd64 && \ + export GOEXPERIMENT=boringcrypto && \ + CGO_ENABLED=1 go build -tags fips_strict,osusergo,netgo -v -trimpath -ldflags "$(hack/get-ldflags.sh) -w -linkmode=external -extldflags -static" -o /tmp/out/pinniped ./cmd/pinniped/main.go && \ + CGO_ENABLED=1 go test -tags fips_strict,osusergo,netgo -c -ldflags "$(hack/get-ldflags.sh) -w -linkmode=external -extldflags -static" -vet=off -race ./test/integration -o /tmp/out/pinniped-integration-test # need cgo for race detector + +FROM scratch +COPY --from=build-env /tmp/out/ /usr/local/bin/ diff --git a/pipelines/shared-tasks/build-cli-binaries/task.sh b/pipelines/shared-tasks/build-cli-binaries/task.sh new file mode 100755 index 000000000..cc3d25d16 --- /dev/null +++ b/pipelines/shared-tasks/build-cli-binaries/task.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +go version + +export GOCACHE="$PWD/cache/gocache" +export GOMODCACHE="$PWD/cache/gomodcache" + +if [[ "$DRY_RUN" == "yes" ]]; then + # Dry run with a fake version number. Not intended for use when building a final release of the CLI! + export KUBE_GIT_VERSION="v1.2.3" +else + # Ensure that the input was given in this case, since it is an optional input to the task. + if [[ ! -f release-info/version-with-v ]]; then + echo 'Did not find release-info/version-with-v' + exit 1 + fi + + # This env var is used by hack/get-ldflags.sh below + export KUBE_GIT_VERSION="$(cat release-info/version-with-v)" +fi + +echo "Building using version number $KUBE_GIT_VERSION ..." + +pushd pinniped >/dev/null + ldflags="$(hack/get-ldflags.sh)" +popd >/dev/null + +pushd pinniped/cmd/pinniped >/dev/null + +# Make a temp directory for the CLI binaries +output_dir="$(mktemp -d)" + +target_os_list=(linux darwin windows) +target_platform_list=(amd64 arm64) +for target_os in "${target_os_list[@]}"; do + for target_platform in "${target_platform_list[@]}"; do + echo "Building CLI for OS $target_os / platform $target_platform ..." + + name="pinniped" + output="pinniped-cli-${target_os}-${target_platform}" + + if [[ "$target_os" == "windows" ]]; then + name="${name}.exe" + output="${output}.exe" + fi + + # Cross-compile the executable binary (CGO_ENABLED=0 means static linking) + CGO_ENABLED=0 GOOS="$target_os" GOARCH="$target_platform" go build -trimpath -ldflags "$ldflags" -o "$output_dir" ./... + + mv "${output_dir}/${name}" "../../../cli-binaries/${output}" + done +done + +popd >/dev/null + +linux_cli="cli-binaries/pinniped-cli-linux-amd64" +chmod 755 "$linux_cli" +echo "checking to see if 'pinniped version' has an '--output' flag" +success=0 +output=$("$linux_cli" version --output json 2>&1) || success=$? + +if [[ $success -eq 0 ]]; then + echo "pinniped version has an --output flag" + echo "result of version command: $output" + echo "" + echo "running grep:" + + # Make sure that `pinniped version` reports the version number that we just tried to bake in to the binaries. + if ! echo "$output" | grep "\"gitVersion\"\: \"$KUBE_GIT_VERSION\","; then + echo "Running 'pinniped version' did not output the expected version number!" + echo "Actual: $("$linux_cli" version -o json)" + echo "Expected to include '\"gitVersion\"\: \"$KUBE_GIT_VERSION\",'" + exit 1 + else + echo "✅" + fi + + # Make sure that `pinniped version` reports a clean git state. + chmod 755 "$linux_cli" + if ! echo "$output" | grep "\"gitTreeState\"\: \"clean\","; then + echo "Running 'pinniped version' did not have a clean gitTreeState!" + echo "Actual: $("$linux_cli" version -o json)" + exit 1 + else + echo "✅" + fi +else + echo "pinniped version does not have an --output flag" + output=$("$linux_cli" version) + echo "result of version command: $output" + echo "" + echo "running grep:" + + # Make sure that `pinniped version` reports the version number that we just tried to bake in to the binaries. + if ! echo "$output" | grep ", GitVersion:\"$KUBE_GIT_VERSION\","; then + echo "Running 'pinniped version' did not output the expected version number!" + echo "Actual: $("$linux_cli" version)" + echo "Expected to include ', GitVersion:\"$KUBE_GIT_VERSION\",'" + exit 1 + else + echo "✅" + fi +fi diff --git a/pipelines/shared-tasks/build-cli-binaries/task.yml b/pipelines/shared-tasks/build-cli-binaries/task.yml new file mode 100644 index 000000000..46b83f92e --- /dev/null +++ b/pipelines/shared-tasks/build-cli-binaries/task.yml @@ -0,0 +1,23 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped + - name: pinniped-ci + - name: release-info + optional: true # only really optional when DRY_RUN == "yes" +outputs: + - name: cli-binaries +params: + DRY_RUN: +run: + path: pinniped-ci/pipelines/shared-tasks/build-cli-binaries/task.sh +caches: + - path: cache diff --git a/pipelines/shared-tasks/build-kind-node-image/build-image.sh b/pipelines/shared-tasks/build-kind-node-image/build-image.sh new file mode 100755 index 000000000..6ddb9f15a --- /dev/null +++ b/pipelines/shared-tasks/build-kind-node-image/build-image.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# This procedure is inspired from https://github.com/aojea/kind-images/blob/master/.circleci/config.yml + +set -euo pipefail + +# Choose the tag for the new image that we will build below. +full_repo="${PUSH_TO_IMAGE_REGISTRY}/${PUSH_TO_IMAGE_REPO}" +image_tag="${full_repo}:latest" + +# Make sure some basic build tools are installed. +sudo apt-get update && sudo sudo apt-get install build-essential procps curl file git rsync -y + +# Install kubectl. +curl -fLO https://storage.googleapis.com/kubernetes-release/release/"$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)"/bin/linux/amd64/kubectl +chmod +x kubectl +sudo mv kubectl /usr/local/bin/ + +# Install docker according to procedure from https://docs.docker.com/engine/install/debian/ +sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release -y +curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg +echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null +sudo apt-get update +sudo apt-get install docker-ce docker-ce-cli containerd.io -y +sudo systemctl enable docker.service +sudo systemctl enable containerd.service +# Docker is only available for use by the root user in a default install, so run docker commands as root. +sudo docker run hello-world + +echo +echo "Installing Docker and dev tools succeeded." +echo + +# Clone kind and k/k. +git clone https://github.com/kubernetes-sigs/kind.git kind +kind_version=$(git -C kind log -1 --pretty='%h') +# Clone as root because we are going to run the Kubernetes build scripts as root, +# and the file ownerships need to match the user who runs the scripts. +sudo git clone https://github.com/kubernetes/kubernetes.git /tmp/kubernetes +kube_version=$(sudo git -C /tmp/kubernetes log -1 --pretty='%h') + +echo +echo "Cloning repos succeeded. Kind @ ${kind_version} and Kube @ ${kube_version}." +echo + +# Build kind. This make command will install Go if needed. +cd kind +make build + +echo +echo "Building kind succeeded." +echo + +# Use kind to build a node image using the latest k/k. +sudo ./bin/kind build node-image --image "${image_tag}" /tmp/kubernetes -v=3 + +echo +echo "Building node image succeeded." +echo + +# Test that the new kind image can be used to successfully create a kind cluster. +# In case of cluster creation failure, maybe it would be interesting to export the logs? `./bin/kind export logs /tmp/kind` +sudo ./bin/kind create cluster --image "${image_tag}" -v=3 --wait 1m --retain + +# Make sure we can query some basic stuff from the new cluster. +sudo kubectl get nodes -o wide +sudo kubectl get pods --all-namespaces -o wide +sudo kubectl get services --all-namespaces -o wide + +echo +echo "Creating cluster with new node image succeeded." +echo + +echo "$DOCKER_PASSWORD" | sudo docker login "${PUSH_TO_IMAGE_REGISTRY}" -u "$DOCKER_USERNAME" --password-stdin +sudo docker push "${image_tag}" + +version_tag="${full_repo}:kind${kind_version}_k8s${kube_version}" +sudo docker tag "${image_tag}" "${version_tag}" +sudo docker push "${version_tag}" + +echo +echo "Image push succeeded." diff --git a/pipelines/shared-tasks/build-kind-node-image/task.sh b/pipelines/shared-tasks/build-kind-node-image/task.sh new file mode 100755 index 000000000..58e5b90a4 --- /dev/null +++ b/pipelines/shared-tasks/build-kind-node-image/task.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +instance_name=$(cat instance/name) + +local_build_script="pinniped-ci/pipelines/shared-tasks/build-kind-node-image/build-image.sh" +remote_build_script="/tmp/build-image.sh" + +gcloud auth activate-service-account \ + "$GCP_USERNAME" \ + --key-file <(echo "$GCP_JSON_KEY") \ + --project "$GCP_PROJECT" + +# Create a temporary username because we can't ssh as root. Note that this username must be 32 character or less. +ssh_user="kind-node-builder-$(openssl rand -hex 4)" +ssh_dest="${ssh_user}@${instance_name}" +echo "ssh user@dest will be ${ssh_dest}" + +# gcloud scp/ssh commands will interactively prompt to create an ssh key unless one already exists, so create one. +mkdir -p "$HOME/.ssh" +ssh_key_file="$HOME/.ssh/kind-node-builder-key" +ssh-keygen -t rsa -b 4096 -q -N "" -f "$ssh_key_file" + +# Copy the build script to the VM. +echo "Copying $local_build_script to $instance_name as $remote_build_script..." +gcloud compute scp --zone "$INSTANCE_ZONE" --project "$GCP_PROJECT" \ + --ssh-key-file "$ssh_key_file" --ssh-key-expire-after 1h --strict-host-key-checking no \ + "$local_build_script" "$ssh_dest":"$remote_build_script" + +# Run the script that was copied to the server above. +# Note that this assumes that there is no single quote character inside the values of PUSH_TO_IMAGE_REPO, +# DOCKER_USERNAME, and DOCKER_PASSWORD, which would cause quoting problems in the command below. +echo "Running $remote_build_script on $instance_name..." +gcloud compute ssh --zone "$INSTANCE_ZONE" --project "$GCP_PROJECT" "$ssh_dest" \ + --ssh-key-file "$ssh_key_file" --ssh-key-expire-after 1h --strict-host-key-checking no \ + --command "chmod 755 $remote_build_script && export PUSH_TO_IMAGE_REGISTRY='${PUSH_TO_IMAGE_REGISTRY}' && export PUSH_TO_IMAGE_REPO='${PUSH_TO_IMAGE_REPO}' && export DOCKER_USERNAME='${DOCKER_USERNAME}' && export DOCKER_PASSWORD='${DOCKER_PASSWORD}' && $remote_build_script" + +echo +echo "Done!" diff --git a/pipelines/shared-tasks/build-kind-node-image/task.yml b/pipelines/shared-tasks/build-kind-node-image/task.yml new file mode 100644 index 000000000..732676cc7 --- /dev/null +++ b/pipelines/shared-tasks/build-kind-node-image/task.yml @@ -0,0 +1,22 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci + - name: instance +outputs: +params: + # Docker push destination location and credentials + PUSH_TO_IMAGE_REGISTRY: + PUSH_TO_IMAGE_REPO: + DOCKER_USERNAME: + DOCKER_PASSWORD: + # GCP VM location and credentials + INSTANCE_ZONE: + GCP_PROJECT: + GCP_USERNAME: + GCP_JSON_KEY: +run: + path: pinniped-ci/pipelines/shared-tasks/build-kind-node-image/task.sh diff --git a/pipelines/shared-tasks/check-dockerfile-deps-updated/task.sh b/pipelines/shared-tasks/check-dockerfile-deps-updated/task.sh new file mode 100755 index 000000000..633a064ad --- /dev/null +++ b/pipelines/shared-tasks/check-dockerfile-deps-updated/task.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +golang="golang" +distroless="gcr.io/distroless/static" +distroless_tag="nonroot" + +new_golang="${golang}:$(cat golang-image/tag)@$(cat golang-image/digest)" + +# Because we were having trouble getting the concourse registry-image resource to check this container image +# without auth errors, we get its latest digest here by using crane instead. +new_distroless="${distroless}:${distroless_tag}@$(crane digest "${distroless}:${distroless_tag}")" + +echo "FOUND LATEST VERSIONS:" +echo "$new_golang" +echo "$new_distroless" +echo + +if [[ "$(cat golang-image/tag)" == "latest" ]]; then + echo "ERROR: The tag for the golang-image resource is 'latest'." + echo "This means we are experiencing the Concourse bug https://github.com/concourse/registry-image-resource/issues/351." + echo "Refusing to continue. We do not want to put the 'latest' tag into our Dockerfiles." + echo + echo "WORKAROUND: Please visit the Concourse UI page for the golang-image resource" + echo "in this pipeline and disable the resource version with the 'latest' tag by clicking its checkbox" + echo "to toggle it to the disabled state. Then trigger this job again." + exit 1 +fi + +# Copy everything to output. +# Don't use git clone because that would throw away uncommitted changes from previous tasks. +# Be careful to include the .git directory too. +cp -r pinniped-in/. pinniped-out + +cd pinniped-out + +dockerfile_list=("Dockerfile" "hack/Dockerfile_fips") + +for dockerfile in "${dockerfile_list[@]}"; do + + # Replace all golang:anything + sed -E -i "s/${golang}:\\S+/${new_golang}/g" "$dockerfile" + + # Replace all golang@anything + # Do this second so it does not replace the results of the above sed, which will be golang:new_value + sed -E -i "s/${golang}@\\S+/${new_golang}/g" "$dockerfile" + + # Replace all gcr.io/distroless/static:anything + sed -E -i "s#${distroless}:\\S+#${new_distroless}#g" "$dockerfile" + + # Replace all gcr.io/distroless/static@anything + # Do this second so it does not replace the results of the above sed + sed -E -i "s#${distroless}@\\S+#${new_distroless}#g" "$dockerfile" + + # Print diff output to the screen so it is shown in the job output. + echo + git --no-pager diff "$dockerfile" + +done diff --git a/pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml b/pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml new file mode 100644 index 000000000..96c749ba5 --- /dev/null +++ b/pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml @@ -0,0 +1,14 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-in + - name: pinniped-ci + - name: golang-image +outputs: + - name: pinniped-out +params: +run: + path: pinniped-ci/pipelines/shared-tasks/check-dockerfile-deps-updated/task.sh diff --git a/pipelines/shared-tasks/check-golang-deps-updated/task.sh b/pipelines/shared-tasks/check-golang-deps-updated/task.sh new file mode 100755 index 000000000..3d4672fe7 --- /dev/null +++ b/pipelines/shared-tasks/check-golang-deps-updated/task.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# Copy everything to output. +git clone ./pinniped-in ./pinniped-out + +cd pinniped-out + +./hack/update-go-mod/update-go-mod.sh + +# Print diff output to the screen so it is shown in the job output. +echo +git --no-pager diff diff --git a/pipelines/shared-tasks/check-golang-deps-updated/task.yml b/pipelines/shared-tasks/check-golang-deps-updated/task.yml new file mode 100644 index 000000000..a93c8a685 --- /dev/null +++ b/pipelines/shared-tasks/check-golang-deps-updated/task.yml @@ -0,0 +1,19 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + # Always use the latest version of Go for this job, so it can update the toolchain directives to match that version. + tag: latest +inputs: + - name: pinniped-in + - name: pinniped-ci +outputs: + - name: pinniped-out +params: +run: + path: pinniped-ci/pipelines/shared-tasks/check-golang-deps-updated/task.sh diff --git a/pipelines/shared-tasks/cleanup-aws/task.sh b/pipelines/shared-tasks/cleanup-aws/task.sh new file mode 100755 index 000000000..770922400 --- /dev/null +++ b/pipelines/shared-tasks/cleanup-aws/task.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env sh + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script will aggressively cleanup our AWS resources using the aws-nuke +# command below. +# +# Currently, we cleanup a hardcoded subset resource types to be +# risk-averse. This list can be extended by adding a resource type to +# .resource-types.targets in the config YAML below. The resources that are there +# were selected based on scanning our AWS account and finding the most egregious +# violators of infrastructure pollution. +# +# Some things to think about before you run/edit this script: +# - Are there CI jobs currently running on AWS infrastructure (think: EKS clusters) +# that will start to fail if you run this script? +# - Are we deleting resources that VMware created when they set this account +# up for us (think: bootstrapped IAMRole's)? +# - Should we start running this script on a scheduled (i.e., every Saturday +# morning)? + +# Set up our AWS service account for the aws-nuke command to use. +# This should be the equivalent of running these commands, but without needing the aws CLI: +#aws configure set credential_source Environment --profile service-account +#aws configure set role_arn "$AWS_ROLE_ARN" --profile service-account +mkdir "$HOME/.aws" +cat < "$HOME/.aws/config" +[profile service-account] +credential_source = Environment +role_arn = $AWS_ROLE_ARN +EOF + +targets="{}" # the empty map indicates that we want to target _all_ resource types +# target the whole account with no filters +if [[ "$ALL_RESOURCES" != "yes" ]]; then + # let's try to keep these in case-insensitive alpha order for search-ability + targets=" + targets: + - CloudFormationStack + - CloudWatchAlarm + - EC2Address + - EC2Instance + - EC2InternetGateway + - EC2InternetGatewayAttachment + - EC2KeyPair + - EC2NATGateway + - EC2NetworkACL + - EC2NetworkInterface + - EC2RouteTable + - EC2SecurityGroup + - EC2Subnet + - EC2Volume + - EC2VPC + - ELB +" +fi + +# explicitly exclude us-east-2 from this list because we have long-running environments there. +config_file="$(mktemp)" +cat <"$config_file" +regions: +- us-west-1 +- us-west-2 +- us-east-1 +- global + +account-blocklist: +# dummy entry -- we don't have any production accounts, but aws-nuke forces you to have at least 1 +- "999999999999" + +resource-types: + # only nuke these resource types + $targets + +accounts: + "${AWS_ACCOUNT_NUMBER}": {} +EOF + +cmd="aws-nuke --config ${config_file} --profile service-account" +if [[ "$REALLY_CLEANUP" == "yes" ]]; then + cmd="$cmd --no-dry-run" +fi + +# turn off pipefail since the first command below (i.e., the subshell) will most +# likely get sent SIGPIPE after aws-nuke exits and that will cause our script to +# fail. +set +o pipefail + +# continually send "tua-test" to stdin to serve as a confirmation for aws-nuke. +# this is done in a loop since aws-nuke uses a new buffered reader to consume +# stdin each time it wants to accept input from the user. +(while true; do echo tua-test; sleep 1; done) | ${cmd} diff --git a/pipelines/shared-tasks/cleanup-aws/task.yml b/pipelines/shared-tasks/cleanup-aws/task.yml new file mode 100644 index 000000000..faa86248f --- /dev/null +++ b/pipelines/shared-tasks/cleanup-aws/task.yml @@ -0,0 +1,20 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: quay.io/rebuy/aws-nuke +inputs: + - name: pinniped-ci +params: + AWS_ACCOUNT_NUMBER: + AWS_SECRET_ACCESS_KEY: + AWS_ACCESS_KEY_ID: + REALLY_CLEANUP: "no" + ALL_RESOURCES: "no" + AWS_ROLE_ARN: +run: + path: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.sh diff --git a/pipelines/shared-tasks/confirm-built-with-fips/task.sh b/pipelines/shared-tasks/confirm-built-with-fips/task.sh new file mode 100755 index 000000000..61c6cc667 --- /dev/null +++ b/pipelines/shared-tasks/confirm-built-with-fips/task.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# check whether the pinniped-server binary has particular symbols that only exists when it's compiled with boringcrypto. +# https://go.googlesource.com/go/+/dev.boringcrypto/misc/boring#caveat + +# Starting in 1.19, go-boringcrypto has been added to the main Go toolchain, +# hidden behind a `GOEXPERIMENT=boringcrypto` env var. +# See https://go.googlesource.com/go/+/dev.boringcrypto/README.boringcrypto.md +# and https://kupczynski.info/posts/fips-golang/ for details. + +pinniped_server_has_boringcrypto="$(go tool nm './image/rootfs/usr/local/bin/pinniped-server' | grep '_Cfunc__goboringcrypto_')" +# check that we got any output from the previous command. If it wasn't built with boringcrypto, this variable +# should be empty because grep should filter it all out. Else it'll be a long list of symbols. +if [ -z "$pinniped_server_has_boringcrypto" ] +then + echo "Pinniped server binary wasn't built with boringcrypto." + exit 1 +fi +# check whether the pinniped-server binary has particular symbols that only exist when it's compiled with non-boring crypto +pinniped_server_has_regular_crypto="$(go tool nm './image/rootfs/usr/local/bin/pinniped-server' | grep sha256 | grep di)" +# if any of these symbols exist, that means it was compiled wrong and it should fail. +if [ -n "$pinniped_server_has_regular_crypto" ] +then + echo "Pinniped server binary was built with non-boring crypto." + exit 1 +fi +# check whether the kube-cert-agent binary has particular symbols that only exist when it's compiled with non-boring crypto +kube_cert_agent_has_regular_crypto="$(go tool nm './image/rootfs/usr/local/bin/pinniped-concierge-kube-cert-agent' | grep sha256 | grep di)" +# if any of these symbols exist, that means it was compiled wrong and it should fail. +if [ -n "$kube_cert_agent_has_regular_crypto" ] +then + echo "kube-cert-agent binary was built with non-boring crypto." + exit 1 +fi +# check the ldd output to see whether we compiled a static executable or not. +pinniped_server_ldd="$(ldd './image/rootfs/usr/local/bin/pinniped-server' 2>&1)" +# if it doesn't contain this line, that means the executable was dynamic, +# which we don't want. +if [[ "$pinniped_server_ldd" != *"not a dynamic executable"* ]] +then + echo "pinniped server binary is a dynamic executable." + exit 1 +fi +# check the ldd output to see whether we compiled a static executable or not. +kube_cert_agent_ldd="$(ldd './image/rootfs/usr/local/bin/pinniped-concierge-kube-cert-agent' 2>&1)" +# if it doesn't contain this line, that means the executable was dynamic, +# which we don't want. +if [[ "$kube_cert_agent_ldd" != *"not a dynamic executable"* ]] +then + echo "kube cert agent binary is a dynamic executable." + exit 1 +fi \ No newline at end of file diff --git a/pipelines/shared-tasks/confirm-built-with-fips/task.yml b/pipelines/shared-tasks/confirm-built-with-fips/task.yml new file mode 100644 index 000000000..f71aea81e --- /dev/null +++ b/pipelines/shared-tasks/confirm-built-with-fips/task.yml @@ -0,0 +1,15 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: image + - name: pinniped-ci +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +run: + path: pinniped-ci/pipelines/shared-tasks/confirm-built-with-fips/task.sh diff --git a/pipelines/shared-tasks/confirm-version/task.yml b/pipelines/shared-tasks/confirm-version/task.yml new file mode 100644 index 000000000..3a8f54324 --- /dev/null +++ b/pipelines/shared-tasks/confirm-version/task.yml @@ -0,0 +1,38 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: image +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +run: + # Confirm that the correct git sha was baked into the executables and that they log the version as their + # first line of output. Do this by directly running the server binary from the rootfs of the built image. + path: bash + args: + - -ceux + - | + set -o pipefail + pushd pinniped >/dev/null + expected_sha=$(git rev-parse HEAD) + expected_date=$(git --no-pager log -1 --format='%ct' | xargs -I {} date --date='TZ="UTC" @{}' +"%Y-%m-%dT%H:%M:%SZ") + popd >/dev/null + if [[ "$expected_sha" == "" ]]; then exit 1; fi + + server_binary="$PWD/image/rootfs/usr/local/bin/pinniped-server" + ln -s "$server_binary" /usr/local/bin/pinniped-concierge + ln -s "$server_binary" /usr/local/bin/pinniped-supervisor + + # These commands should log the version within the first 10 lines of output, and then exit with an error because there is no yaml config file to load. + set +e + concierge_output=$(/usr/local/bin/pinniped-concierge 2>&1 >/dev/null | head -10) + supervisor_output=$(/usr/local/bin/pinniped-supervisor 2>&1 >/dev/null | head -10) + set -e + echo $concierge_output | grep ",\"gitCommit\":\"$expected_sha\",\"gitTreeState\":\"clean\",\"buildDate\":\"$expected_date" + echo $supervisor_output | grep ",\"gitCommit\":\"$expected_sha\",\"gitTreeState\":\"clean\",\"buildDate\":\"$expected_date" diff --git a/pipelines/shared-tasks/copy-image/task.sh b/pipelines/shared-tasks/copy-image/task.sh new file mode 100755 index 000000000..f1a1c2fb7 --- /dev/null +++ b/pipelines/shared-tasks/copy-image/task.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +SOURCE_REPOSITORY="$(cat ci-build-image/repository)" +SOURCE_DIGEST="$(cat ci-build-image/digest)" +SOURCE_IMAGE="${SOURCE_REPOSITORY}@${SOURCE_DIGEST}" + +SOURCE_REGISTRY="$(echo "$SOURCE_REPOSITORY" | cut -d / -f 1)" +DESTINATION_REGISTRY="$(echo "$DESTINATION_REPOSITORY" | cut -d / -f 1)" + +# Login to both the source and the dest. +echo "Logging in to $SOURCE_REPOSITORY_USERNAME ..." +crane auth login -u "$SOURCE_REGISTRY" -p "$SOURCE_REPOSITORY_PASSWORD" "$SOURCE_REGISTRY" +echo "Logging in to $DESTINATION_REGISTRY ..." +crane auth login -u "$DESTINATION_REPOSITORY_USERNAME" -p "$DESTINATION_REPOSITORY_PASSWORD" "$DESTINATION_REGISTRY" + +# Create an array of all desired tags. +echo "Collecting desired tags ..." +DESTINATION_TAGS=() + +# Add the destination tag, if one was specified. +if [[ -n "$DESTINATION_TAG" ]]; then + echo "Saw desired tag $DESTINATION_TAG" + DESTINATION_TAGS+=("$DESTINATION_TAG") +fi + +# Add each tag from input file release-info/image-tags. +while IFS="" read -r tag || [ -n "$tag" ]; do + echo "Saw desired tag $tag" + DESTINATION_TAGS+=("$tag") +done name + +echo "Done!" diff --git a/pipelines/shared-tasks/create-kind-node-builder-vm/task.yml b/pipelines/shared-tasks/create-kind-node-builder-vm/task.yml new file mode 100644 index 000000000..bf708bff9 --- /dev/null +++ b/pipelines/shared-tasks/create-kind-node-builder-vm/task.yml @@ -0,0 +1,16 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci +outputs: + - name: create-kind-node-builder-vm-output +params: + INSTANCE_ZONE: + GCP_PROJECT: + GCP_USERNAME: + GCP_JSON_KEY: +run: + path: pinniped-ci/pipelines/shared-tasks/create-kind-node-builder-vm/task.sh diff --git a/pipelines/shared-tasks/create-or-update-pr/task.sh b/pipelines/shared-tasks/create-or-update-pr/task.sh new file mode 100755 index 000000000..c0484631c --- /dev/null +++ b/pipelines/shared-tasks/create-or-update-pr/task.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +branch="${BRANCH:-"pinny/bump-deps"}" + +cd pinniped + +# Print the current status to the log. +git status + +# Copied from https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/githubs-ssh-key-fingerprints +github_hosts=' +github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl +github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= +github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= +' + +# Prepare to be able to do commits and pushes. +ssh_dir="$HOME"/.ssh/ +mkdir "$ssh_dir" +echo "$github_hosts" >"$ssh_dir"/known_hosts +echo "${DEPLOY_KEY}" >"$ssh_dir"/id_rsa +chmod 600 "$ssh_dir"/id_rsa +git config user.email "pinniped-ci-bot@users.noreply.github.com" +git config user.name "Pinny" +git remote add ssh_origin "git@github.com:vmware-tanzu/pinniped.git" + +# Add all the changed files. +git add . + +# Print the current status to the log. +git status + +# Did we just stage any changes? +staged=$(git --no-pager diff --staged) +if [[ "$staged" == "" ]]; then + # Nothing to commit. We are done. + echo "No changes to any files detected. Done." + exit 0 +fi + +# Check if the branch already exists on the remote. +new_branch="no" +if [[ -z "$(git ls-remote ssh_origin "$branch")" ]]; then + echo "The branch does not already exist, so create it." + git checkout -b "$branch" + git status + new_branch="yes" +else + echo "The branch already exists, so pull it." + # Stash our changes before using git checkout and git reset, which both can throw away local changes. + git status + git stash + # Fetch all the remote branches so we can use one of them. + git fetch ssh_origin + # The branch already exists, so reuse it. + git checkout "$branch" + # Pull to sync up commits with the remote branch. + git pull --rebase --autostash + # Throw away all previous commits on the branch and set it up to look like main again. + git reset --hard main + # Bring back our changes and stage them again. + git stash pop + git add . + git status +fi + +# Show diff for the log. +echo "Found changes to commit:" +echo +git --no-pager diff --staged +echo + +# Commit. +echo "Committing changes to branch $branch. New branch? $new_branch." +git commit -m "Bump dependencies" + +# Push. +if [[ "$new_branch" == "yes" ]]; then + # Push the new branch to the remote. + echo "Pushing the new branch." + git push --set-upstream ssh_origin "$branch" +else + # Force push the existing branch to the remote. + echo "Force pushing the existing branch." + git push --force-with-lease +fi + +# Now check if there is already a PR open for our branch. +# If there is already an open PR, then we just updated it by force pushing the branch. +# Note that using the gh CLI without login depends on setting the GH_TOKEN env var. +open_pr=$(gh pr list --head "$branch" --json title --jq '. | length') +if [[ "$open_pr" == "0" ]]; then + # There is no currently open PR for this branch, so open a new PR for this branch + # against main, and set the title and body. + echo "Creating PR." + gh pr create --head "$branch" --base main \ + --title "Bump dependencies" --body "Automatically bumped all go.mod direct dependencies and/or images in dockerfiles." +fi diff --git a/pipelines/shared-tasks/create-or-update-pr/task.yml b/pipelines/shared-tasks/create-or-update-pr/task.yml new file mode 100644 index 000000000..862616096 --- /dev/null +++ b/pipelines/shared-tasks/create-or-update-pr/task.yml @@ -0,0 +1,14 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci + - name: pinniped +params: + DEPLOY_KEY: + GH_TOKEN: + BRANCH: +run: + path: pinniped-ci/pipelines/shared-tasks/create-or-update-pr/task.sh diff --git a/pipelines/shared-tasks/deploy-aks-cluster/task.sh b/pipelines/shared-tasks/deploy-aks-cluster/task.sh new file mode 100755 index 000000000..d1ee433a6 --- /dev/null +++ b/pipelines/shared-tasks/deploy-aks-cluster/task.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +cd deploy-aks-cluster-output +az login \ + --service-principal \ + --tenant "$AZURE_TENANT" \ + --username "$AZURE_USERNAME" \ + --password "$AZURE_PASSWORD" + +echo +echo "Trying to use Kubernetes version $KUBE_VERSION" + +# mcr.microsoft.com/azure-cli image doesn't include jq anymore. +# https://github.com/Azure/azure-cli/issues/29827#issuecomment-2326125769 +# https://github.com/MicrosoftDocs/azure-docs-cli/blob/main/docs-ref-conceptual/release-notes-azure-cli.md +# But it does seem to include openssl! +tdnf install jq --assumeyes + +# Look up the latest AKS Kubernetes version corresponding to $KUBE_VERSION. +AKS_VERSIONS="$(az aks get-versions --location "$AZURE_REGION" -o json \ + | jq -r '.values[].patchVersions|keys' \ + | jq -s flatten \ + | jq -r 'join("\n")' \ + | sort -rn)" +echo +echo "Found all versions of Kubernetes supported by AKS:" +echo "$AKS_VERSIONS" + +AKS_VERSION="$(echo "$AKS_VERSIONS" | grep -F "$KUBE_VERSION" | head -1)" +echo +echo "Selected AKS version $AKS_VERSION" + +# The cluster name becomes the name of the lock in the pool. +CLUSTER_NAME="aks-$(openssl rand -hex 8)" +echo "$CLUSTER_NAME" > name + +# Start the cluster. +az aks create \ + --resource-group "$AZURE_RESOURCE_GROUP" \ + --name "$CLUSTER_NAME" \ + --kubernetes-version "$AKS_VERSION" \ + --node-count 1 \ + --generate-ssh-keys \ + --enable-managed-identity + +# Get an admin kubeconfig (client cert + long-lived token), which becomes the value of the lock in the pool. +az aks get-credentials \ + --name "$CLUSTER_NAME" \ + --resource-group "$AZURE_RESOURCE_GROUP" \ + --admin \ + --file metadata +chmod 0644 metadata diff --git a/pipelines/shared-tasks/deploy-aks-cluster/task.yml b/pipelines/shared-tasks/deploy-aks-cluster/task.yml new file mode 100644 index 000000000..7fcfc5c9a --- /dev/null +++ b/pipelines/shared-tasks/deploy-aks-cluster/task.yml @@ -0,0 +1,18 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci +outputs: + - name: deploy-aks-cluster-output +params: + KUBE_VERSION: + AZURE_REGION: + AZURE_TENANT: + AZURE_RESOURCE_GROUP: + AZURE_USERNAME: + AZURE_PASSWORD: +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-aks-cluster/task.sh diff --git a/pipelines/shared-tasks/deploy-eks-cluster/task.sh b/pipelines/shared-tasks/deploy-eks-cluster/task.sh new file mode 100755 index 000000000..6eb037dd6 --- /dev/null +++ b/pipelines/shared-tasks/deploy-eks-cluster/task.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +echo "Using Kubernetes version $KUBE_VERSION" +cd deploy-eks-cluster-output + +# Set up our AWS service account in the AWS CLI. +aws configure set credential_source Environment --profile service-account +aws configure set role_arn "$AWS_ROLE_ARN" --profile service-account + +# Set some variables. +CLUSTER_NAME="eks-$(python -c 'import os,binascii; print binascii.b2a_hex(os.urandom(8))')" +ADMIN_USERNAME="$CLUSTER_NAME-admin" +export CLUSTER_NAME +export ADMIN_USERNAME +export AWS_PAGER="" # prevent aws CLI hang with "WARNING: terminal is not fully functional" +ADMIN_KUBECONFIG="admin-kubeconfig" +SERVICE_ACCOUNT_NAME=test-admin-service-account +SERVICE_ACCOUNT_NAMESPACE=default +SECRET_NAME="${SERVICE_ACCOUNT_NAME}-secret" +NEW_KUBECONFIG_FILE="metadata" +NEW_CONTEXT=default +NEW_KUBECONFIG_USER="admin-service-account" + +# The cluster name becomes the name of the lock in the pool. +echo "$CLUSTER_NAME" > name + +# The kubeconfig file becomes the value of the lock in the pool. +echo "Creating $CLUSTER_NAME in $AWS_DEFAULT_REGION..." + +# See https://eksctl.io/usage/schema/ for documentation of this yaml. +cat < ${NEW_KUBECONFIG_FILE}.minified +mv ${NEW_KUBECONFIG_FILE}.minified ${NEW_KUBECONFIG_FILE} + +# Check that the new kubeconfig file works +kubectl get namespaces --kubeconfig "${NEW_KUBECONFIG_FILE}" + +# Set the permissions on the file. +chmod 0644 "${NEW_KUBECONFIG_FILE}" diff --git a/pipelines/shared-tasks/deploy-eks-cluster/task.yml b/pipelines/shared-tasks/deploy-eks-cluster/task.yml new file mode 100644 index 000000000..3420c6af1 --- /dev/null +++ b/pipelines/shared-tasks/deploy-eks-cluster/task.yml @@ -0,0 +1,17 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci +outputs: + - name: deploy-eks-cluster-output +params: + KUBE_VERSION: + AWS_DEFAULT_REGION: + AWS_ACCESS_KEY_ID: + AWS_SECRET_ACCESS_KEY: + AWS_ROLE_ARN: +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-eks-cluster/task.sh diff --git a/pipelines/shared-tasks/deploy-gke-cluster/task.sh b/pipelines/shared-tasks/deploy-gke-cluster/task.sh new file mode 100755 index 000000000..a11a7006b --- /dev/null +++ b/pipelines/shared-tasks/deploy-gke-cluster/task.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# When the kubeconfig is generated by gcloud below, this env var asks gcloud to use the new +# gke-gcloud-auth-plugin client credentials auth plugin in the kubeconfig file. +# See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke +export USE_GKE_GCLOUD_AUTH_PLUGIN=True + +cd deploy-gke-cluster-output +gcloud auth activate-service-account "$GCP_SERVICE_ACCOUNT" --key-file <(echo "$GCP_JSON_KEY") --project "$GCP_PROJECT" + + +if [ -n "$KUBE_VERSION" ]; then + echo + echo "Trying to use Kubernetes version $KUBE_VERSION" + + # Look up the latest GKE version for KUBE_VERSION. + GKE_VERSIONS="$(gcloud container get-server-config --zone "$CLUSTER_ZONE" --format json \ + | jq -r '.validMasterVersions[]')" + echo + echo "Found all versions of Kubernetes supported by GKE:" + echo "$GKE_VERSIONS" + + GKE_VERSION="$(echo "$GKE_VERSIONS" | grep -F "$KUBE_VERSION" \ + | sort -rn \ + | head -1)" + echo + echo "Selected GKE version $GKE_VERSION" + + export VERSION_FLAG="--cluster-version=$GKE_VERSION" +else + export VERSION_FLAG="--release-channel=${GKE_CHANNEL:-"regular"}" +fi + +# Include the zone of the cluster in its name. This will allow us to change our preferred zone for new +# clusters anytime we want, and the existing clusters can still be deleted because the old zone can +# be parsed out from the cluster name at deletion time. +CLUSTER_NAME="gke-$(openssl rand -hex 4)-zone-${CLUSTER_ZONE}" + +# The cluster name becomes the name of the lock in the pool. +echo "$CLUSTER_NAME" > name + +# Start the cluster +# Note that --enable-network-policy is required to enable NetworkPolicy resources. Otherwise they are ignored. +gcloud container clusters create "$CLUSTER_NAME" \ + --zone "$CLUSTER_ZONE" \ + "$VERSION_FLAG" \ + --num-nodes 1 \ + --machine-type e2-standard-4 \ + --preemptible \ + --issue-client-certificate \ + --no-enable-basic-auth \ + --enable-network-policy + +# Get the cluster details back, including the admin certificate: +gcloud container clusters describe "$CLUSTER_NAME" --zone "$CLUSTER_ZONE" --format json \ + > /tmp/cluster.json + +# Make a new kubeconfig user "cluster-admin" using the admin cert. +jq -r .masterAuth.clientCertificate /tmp/cluster.json | base64 -d > /tmp/client.crt +jq -r .masterAuth.clientKey /tmp/cluster.json | base64 -d > /tmp/client.key +kubectl config set-credentials cluster-admin \ + --client-certificate=/tmp/client.crt \ + --client-key=/tmp/client.key + +# Give the "client" user cluster-admin access +kubectl create clusterrolebinding test-client-is-admin --clusterrole cluster-admin --user client + +# Set the kubeconfig context to use the cluster-admin user. +kubectl config set-context --current --user cluster-admin + +# The kubeconfig file becomes the value of the lock in the pool. +kubectl config view --minify --flatten -o yaml > metadata diff --git a/pipelines/shared-tasks/deploy-gke-cluster/task.yml b/pipelines/shared-tasks/deploy-gke-cluster/task.yml new file mode 100644 index 000000000..306934385 --- /dev/null +++ b/pipelines/shared-tasks/deploy-gke-cluster/task.yml @@ -0,0 +1,18 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci +outputs: + - name: deploy-gke-cluster-output +params: + KUBE_VERSION: + CLUSTER_ZONE: + GCP_PROJECT: + GCP_SERVICE_ACCOUNT: + GCP_JSON_KEY: + GKE_CHANNEL: +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-gke-cluster/task.sh diff --git a/pipelines/shared-tasks/deploy-kind-cluster-vm/gce-init.sh b/pipelines/shared-tasks/deploy-kind-cluster-vm/gce-init.sh new file mode 100644 index 000000000..8f5a7164b --- /dev/null +++ b/pipelines/shared-tasks/deploy-kind-cluster-vm/gce-init.sh @@ -0,0 +1,206 @@ +#!/bin/bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# This is the script that runs at startup to launch Kind on GCE. +# A log of the output of this script can be viewed by running this command on the VM: +# sudo journalctl -u google-startup-scripts.service + +set -euo pipefail + +function cleanup() { + # Upon exit, try to save the log of everything that happened to make debugging errors easier. + curl --retry-all-errors --retry 5 -X PUT --data "$(journalctl -u google-startup-scripts.service)" \ + http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/kind/init_log -H "Metadata-Flavor: Google" +} +trap "cleanup" EXIT SIGINT + +PUBLIC_IP="$(curl --retry-all-errors --retry 5 http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip -H "Metadata-Flavor: Google")" +KIND_VERSION="$(curl --retry-all-errors --retry 5 http://metadata.google.internal/computeMetadata/v1/instance/attributes/kind_version -H "Metadata-Flavor: Google")" +K8S_VERSION="$(curl --retry-all-errors --retry 5 http://metadata.google.internal/computeMetadata/v1/instance/attributes/k8s_version -H "Metadata-Flavor: Google")" +KIND_NODE_IMAGE="$(curl --retry-all-errors --retry 5 http://metadata.google.internal/computeMetadata/v1/instance/attributes/kind_node_image -H "Metadata-Flavor: Google")" + +if [[ "$(uname -m)" = x86_64 ]]; then + ARCH=amd64 +elif [[ "$(uname -m)" = aarch64 ]]; then + ARCH=arm64 +else + echo "Error determining architecture from uname -m = $(uname -m)" + exit 1 +fi + +# Install kind +curl --retry-all-errors --retry 10 -Lo /var/lib/google/kind "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-${ARCH}" +chmod +x /var/lib/google/kind + +# Install kubectl +curl --retry-all-errors --retry 10 -Lo /var/lib/google/kubectl "https://dl.k8s.io/release/$(curl -fL -s https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" +chmod +x /var/lib/google/kubectl + +# Starting in Kind v0.12.0, it seems that we must use kubeadm.k8s.io/v1beta3 *only* for Kube 1.23+. +KIND_MAJOR_VERSION=$(echo "$KIND_VERSION" | cut -c2- | cut -d"." -f1) # also cuts off the leading "v" +KIND_MINOR_VERSION=$(echo "$KIND_VERSION" | cut -d"." -f2) +K8S_MAJOR_VERSION=$(echo "$K8S_VERSION" | cut -c2- | cut -d"." -f1) # also cuts off the leading "v" +K8S_MINOR_VERSION=$(echo "$K8S_VERSION" | cut -d"." -f2) +KUBE_ADM_VERSION="kubeadm.k8s.io/v1beta2" +if [[ "$KIND_MAJOR_VERSION" -gt "0" || ( "$KIND_MAJOR_VERSION" == "0" && "$KIND_MINOR_VERSION" -ge "12" ) ]]; then + if [[ "$K8S_VERSION" == "k8s-main" || "$K8S_MAJOR_VERSION" -gt "1" || ( "$K8S_MAJOR_VERSION" == "1" && "$K8S_MINOR_VERSION" -ge "23" ) ]]; then + KUBE_ADM_VERSION="kubeadm.k8s.io/v1beta3" + fi +fi +echo "Selected kubeadm config version $KUBE_ADM_VERSION based on Kind version $KIND_VERSION and K8s version $K8S_VERSION" + +# create a reasonable baseline audit config that only logs metadata +mkdir -p "/tmp/audit-config" +cat < /tmp/audit-config/audit-config.yaml +apiVersion: audit.k8s.io/v1 +kind: Policy +# Don't generate audit events for all requests in RequestReceived stage. +omitStages: +- "RequestReceived" +rules: +# Don't log requests for events +- level: None + resources: + - group: "" + resources: ["events"] +# Don't log authenticated requests to certain non-resource URL paths. +- level: None + userGroups: ["system:authenticated", "system:unauthenticated"] + nonResourceURLs: + - "/api*" # Wildcard matching. + - "/version" + - "/healthz" + - "/readyz" +# A catch-all rule to log all other requests at the Metadata level. +- level: Metadata + # Long-running requests like watches that fall under this rule will not + # generate an audit event in RequestReceived. + omitStages: + - "RequestReceived" +EOF + +cat < /tmp/kind.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + apiServerAddress: "0.0.0.0" + apiServerPort: 6443 +kubeadmConfigPatches: +- | + apiVersion: ${KUBE_ADM_VERSION} + kind: ClusterConfiguration + # ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane. + controlPlaneEndpoint: "${PUBLIC_IP}:6443" + # mount the kind extraMounts into the API server static pod so we can use the audit config + apiServer: + extraVolumes: + - name: audit-config + hostPath: /audit-config/audit-config.yaml + mountPath: /audit-config/audit-config.yaml + readOnly: true + pathType: File + extraArgs: + audit-policy-file: /audit-config/audit-config.yaml + audit-log-path: "-" # log to standard out so that it gets captured by export-cluster-diagnostics + v: "4" + # To make sure the endpoints on our service are correct (this mostly matters for kubectl based + # installs where kapp is not doing magic changes to the deployment and service selectors). + # Setting this field to true makes it so that the API service will do the service cluster IP + # to endpoint IP translations internally instead of relying on the network stack (i.e. kube-proxy). + # The logic inside the API server is very straightforward - randomly pick an IP from the list + # of available endpoints. This means that over time, all endpoints associated with the service + # are exercised. For whatever reason, leaving this as false (i.e. use kube-proxy) appears to + # hide some network misconfigurations when used internally by the API server aggregation layer. + enable-aggregator-routing: "true" + controllerManager: + extraArgs: + v: "4" + scheduler: + extraArgs: + v: "4" +nodes: +- role: control-plane + extraPortMappings: + - protocol: TCP + # This same port number is hardcoded in the integration test setup + # when creating a Service on a kind cluster. It is used to talk to + # the supervisor app via HTTPS. + containerPort: 31243 + hostPort: 12344 + listenAddress: 127.0.0.1 + - protocol: TCP + # This same port number is hardcoded in the integration test setup + # when creating a Service on a kind cluster. It is used to talk to + # the supervisor app via HTTP. + # This is retained for the release-0.12 pipeline's use. The HTTP port + # cannot be exposed anymore on main. When the release-0.12 pipeline + # is not longer needed, then this port mapping can be removed. + containerPort: 31234 + hostPort: 12345 + listenAddress: 127.0.0.1 + - protocol: TCP + # This same port number is used for the second Pinniped deployment's + # supervisor HTTPS port, when there are multiple Pinnipeds deployed. + containerPort: 30243 + hostPort: 11344 + listenAddress: 127.0.0.1 + - protocol: TCP + # This same port number is used for the second Pinniped deployment's + # supervisor HTTP port, when there are multiple Pinnipeds deployed. + # This is retained for the release-0.12 pipeline's use. The HTTP port + # cannot be exposed anymore on main. When the release-0.12 pipeline + # is not longer needed, then this port mapping can be removed. + containerPort: 30234 + hostPort: 11345 + listenAddress: 127.0.0.1 + - protocol: TCP + # This same port number is hardcoded in the integration test setup + # when creating a Service on a kind cluster. It is used to talk to + # the Dex app. + containerPort: 31235 + hostPort: 12346 + listenAddress: 127.0.0.1 + # mount the audit config dir into kind + extraMounts: + - hostPath: /tmp/audit-config/ + containerPath: /audit-config +EOF + +# When KIND_NODE_IMAGE is specified, then use it. Otherwise choose the official kind image for the specified version of K8s. +if [[ "$KIND_NODE_IMAGE" != "" ]]; then + image="$KIND_NODE_IMAGE" +else + image="kindest/node:${K8S_VERSION}" +fi + +/var/lib/google/kind create cluster --wait 5m --kubeconfig /tmp/kubeconfig.yaml --image "$image" --config /tmp/kind.yaml |& tee /tmp/kind-cluster-create.log + +# Change the kubeconfig to make the server address match the public IP configured as controlPlaneEndpoint above. +sed -i "s/0\\.0\\.0\\.0/${PUBLIC_IP}/" /tmp/kubeconfig.yaml + +# The above YAML config file specifies one node, and Kind should never put the "control-plane" +# taint on the node for single-node clusters. Due to the issue described in +# https://github.com/kubernetes-sigs/kind/issues/1699#issuecomment-1048269832 +# we may not be able to rely on Kind automatically removing that taint, depending on which +# version of Kind and which version of the Kubernetes node image we're using, so to keep things +# simple we'll always remove that taint here. +node_name=$(/var/lib/google/kubectl get nodes -o jsonpath='{.items[0].metadata.name}' --kubeconfig /tmp/kubeconfig.yaml) +if [[ "$node_name" == "" ]]; then + echo "ERROR: Did not find any nodes in the new cluster." + exit 1 +fi +# Check if there are any taints. Normally there should be none for single-node clusters, +# unless we are running into the problem described above. +node_taints=$(/var/lib/google/kubectl get nodes -o jsonpath='{.items[0].spec.taints[?(@.key=="node-role.kubernetes.io/control-plane")]}' --kubeconfig /tmp/kubeconfig.yaml) +if [[ "$node_taints" != "" ]]; then + echo "Found taint node-role.kubernetes.io/control-plane Kind node: ${node_taints}" + # Remove the taint that is causing us trouble. + # Putting a minus sign at the end of the key name means remove all taints with that key. + /var/lib/google/kubectl taint nodes "$node_name" "node-role.kubernetes.io/control-plane-" --kubeconfig /tmp/kubeconfig.yaml +fi + +# Success! Save the kubeconfig file. +curl --retry-all-errors --retry 5 -X PUT --data "$(cat /tmp/kubeconfig.yaml)" \ + http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/kind/kubeconfig -H "Metadata-Flavor: Google" diff --git a/pipelines/shared-tasks/deploy-kind-cluster-vm/task.sh b/pipelines/shared-tasks/deploy-kind-cluster-vm/task.sh new file mode 100755 index 000000000..dc9d72348 --- /dev/null +++ b/pipelines/shared-tasks/deploy-kind-cluster-vm/task.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +INSTANCE_ARCH=${INSTANCE_ARCH:-"amd64"} +KIND_STARTUP_TIMEOUT_MINS=${KIND_STARTUP_TIMEOUT_MINS:-"15"} +STARTUP_SCRIPT="$PWD/$STARTUP_SCRIPT" +KIND_VERSION=$(cat kind-release/tag) +echo "Using kind version $KIND_VERSION" + +cd deploy-kind-cluster-vm-output + +gcloud auth activate-service-account \ + "$GCP_USERNAME" \ + --key-file <(echo "$GCP_JSON_KEY") \ + --project "$GCP_PROJECT" + +INSTANCE_NAME="kind-worker-$(openssl rand -hex 4)" + +echo "Creating $INSTANCE_NAME in $INSTANCE_ZONE with k8s_version=$KUBE_VERSION kind_version=$KIND_VERSION kind_node_image=$KIND_NODE_IMAGE ..." + +if [[ "$INSTANCE_ARCH" = "arm64" ]]; then + INSTANCE_TEMPLATE="kind-cluster-instance-arm64-v8" +else + INSTANCE_TEMPLATE="kind-cluster-instance-v8" +fi + +if ! gcloud compute instances create "${INSTANCE_NAME}" \ + --zone "${INSTANCE_ZONE}" \ + --source-instance-template "${INSTANCE_TEMPLATE}" \ + --metadata "k8s_version=$KUBE_VERSION,kind_version=$KIND_VERSION,kind_node_image=$KIND_NODE_IMAGE,enable-guest-attributes=TRUE" \ + --metadata-from-file "startup-script=$STARTUP_SCRIPT" \ + --labels "kind=$(echo "$KIND_VERSION" | tr . -),kube=$(echo "$KUBE_VERSION" | tr . -)"; +then + # Failed to create an instance. Sleep for a random number of seconds before finishing so we can retry this + # task several times without all the various jobs trying to create instances at the same moment. + random_seconds=$((( RANDOM % 30 + 1 ))) + echo "Sleeping $random_seconds seconds before a possible retry." + sleep "$random_seconds" + exit 1 +fi + +echo "$INSTANCE_NAME" > name + +echo "Waiting for kind cluster to start on new instance..." + +start_time_in_seconds_since_epoch=$(date +"%s") + +# gce-init.sh will either write the kubeconfig and the init_log, or will only write the init_log. +# Wait until the init_log appears, or until a timeout occurs. +while true; do + gcloud beta compute instances get-guest-attributes "${INSTANCE_NAME}" \ + --zone "${INSTANCE_ZONE}" \ + --query-path "kind/init_log" \ + --format="value(value)" \ + --verbosity critical > /tmp/init_log && echo "Found the init_log without finding the Kind kubeconfig." && break + + now_in_seconds_since_epoch=$(date +"%s") + if (( $((now_in_seconds_since_epoch - start_time_in_seconds_since_epoch)) > $((KIND_STARTUP_TIMEOUT_MINS * 60)) )); then + echo "Still no Kind kubeconfig or init_log available after waiting ${KIND_STARTUP_TIMEOUT_MINS} minutes. Giving up!" + exit 1 + fi + + echo -n . + sleep 3 +done + +echo +echo "Showing the instance's init_log..." +echo "----------------------------------" +cat /tmp/init_log +echo "----------------------------------" +echo + +# There will be a kubeconfig only if the gce-init.sh script succeeded. +gcloud beta compute instances get-guest-attributes "${INSTANCE_NAME}" \ + --zone "${INSTANCE_ZONE}" \ + --query-path "kind/kubeconfig" \ + --format="value(value)" \ + --verbosity critical > metadata && echo "Found the Kind kubeconfig for instance ${INSTANCE_NAME}." && exit 0 + +echo "Error: Did not find a Kind kubeconfig file for instance ${INSTANCE_NAME}." +exit 1 diff --git a/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml b/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml new file mode 100644 index 000000000..f4dd1a6dc --- /dev/null +++ b/pipelines/shared-tasks/deploy-kind-cluster-vm/task.yml @@ -0,0 +1,21 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci + - name: kind-release +outputs: + - name: deploy-kind-cluster-vm-output +params: + INSTANCE_ZONE: + INSTANCE_ARCH: # Can choose arm64. Defaults to amd64. + KUBE_VERSION: # Choose the version of Kube to install. + KIND_NODE_IMAGE: # When specified, must still specify KUBE_VERSION, which will only be used as a label in this case. + GCP_PROJECT: + GCP_USERNAME: + GCP_JSON_KEY: + STARTUP_SCRIPT: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/gce-init.sh +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-kind-cluster-vm/task.sh diff --git a/pipelines/shared-tasks/deploy-to-acceptance-gke/task.sh b/pipelines/shared-tasks/deploy-to-acceptance-gke/task.sh new file mode 100755 index 000000000..3cba49739 --- /dev/null +++ b/pipelines/shared-tasks/deploy-to-acceptance-gke/task.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +# See https://github.com/concourse/registry-image-resource#in-fetch-the-images-rootfs-and-metadata +digest=$(cat ci-build-image/digest) + +pinniped_ci="$PWD/pinniped-ci" +pinniped_cluster_capability_file="$PWD/pinniped/test/cluster_capabilities/gke.yaml" + +gcloud auth activate-service-account "$GKE_USERNAME" --key-file <(echo "$GKE_JSON_KEY") --project "$PINNIPED_GCP_PROJECT" + +# https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke +export USE_GKE_GCLOUD_AUTH_PLUGIN=True +gcloud container clusters get-credentials "$GKE_CLUSTER_NAME" --zone us-central1-c --project "$PINNIPED_GCP_PROJECT" + +pushd pinniped >/dev/null + +# Create the image pull secret to template using ytt +image_pull_secret=$(kubectl create secret docker-registry dummy \ + --docker-server="$CI_BUILD_IMAGE_SERVER" \ + --docker-username="$CI_BUILD_IMAGE_USERNAME" \ + --docker-password="$CI_BUILD_IMAGE_PASSWORD" \ + --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]') + +if [[ "${TMC_API_TOKEN:-}" == "" ]]; then + # If the TMC API token is not set, then assume that we want to use the local user authenticator + # instead of the TMC webhook authenticator. + export DEPLOY_LOCAL_USER_AUTHENTICATOR="yes" +fi + +# This script uses the API token from the environment variable TMC_API_TOKEN, +# and the cluster name from the environment variable TMC_CLUSTER_NAME. +# +# Set the serving cert parameters to 1h20m and 1h so that we can validate that +# an aggressive cert rotation schedule doesn't mess up the cluster too bad. +CONCIERGE_NAMESPACE=concierge-acceptance \ + SUPERVISOR_NAMESPACE=supervisor-acceptance \ + SUPERVISOR_LOAD_BALANCER=yes \ + SUPERVISOR_LOAD_BALANCER_DNS_NAME="$LOAD_BALANCER_DNS_NAME" \ + SUPERVISOR_LOAD_BALANCER_STATIC_IP="$RESERVED_LOAD_BALANCER_STATIC_IP" \ + SUPERVISOR_INGRESS=yes \ + SUPERVISOR_INGRESS_DNS_NAME="$INGRESS_DNS_ENTRY_GCLOUD_NAME" \ + SUPERVISOR_INGRESS_STATIC_IP_NAME="$INGRESS_STATIC_IP_GCLOUD_NAME" \ + SUPERVISOR_INGRESS_PATH_PATTERN='/*' \ + IMAGE_PULL_SECRET="$image_pull_secret" \ + IMAGE_REPO="$CI_BUILD_IMAGE_NAME" \ + IMAGE_DIGEST="$digest" \ + API_SERVING_CERT_DURATION=4800 \ + API_SERVING_CERT_RENEW_BEFORE=3600 \ + PINNIPED_TEST_CLUSTER_CAPABILITY_FILE="$pinniped_cluster_capability_file" \ + "$pinniped_ci/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh" + +if [[ "${TMC_API_TOKEN:-}" != "" ]]; then + # Create a long-lived webhook IDP allowing TMC login via the TUA organization. + source /tmp/integration-test-env + cat </dev/null + +# Copy the env vars file that was output by the previous script which are needed during integration tests +cp /tmp/integration-test-env integration-test-env-vars/ + +# So that the tests can avoid using the GKE auth plugin, create an admin kubeconfig which uses certs (without the plugin). +# Get the cluster details back, including the admin certificate: +gcloud container clusters describe "$GKE_CLUSTER_NAME" --zone us-central1-c --format json >/tmp/cluster.json +# Make a new kubeconfig user "cluster-admin" using the admin cert. +jq -r .masterAuth.clientCertificate /tmp/cluster.json | base64 -d >/tmp/client.crt +jq -r .masterAuth.clientKey /tmp/cluster.json | base64 -d >/tmp/client.key +kubectl config set-credentials cluster-admin --client-certificate=/tmp/client.crt --client-key=/tmp/client.key +# Give the "client" user cluster-admin access in an idempotent way. +kubectl create clusterrolebinding test-client-is-admin --clusterrole cluster-admin --user client --dry-run=client -o yaml | kubectl apply -f - +# Set the kubeconfig context to use the cluster-admin user. +kubectl config set-context --current --user cluster-admin +# Write out the admin kubeconfig file to the task's output directory. +kubectl config view --minify --flatten -o yaml >kubeconfig/kubeconfig +# Give it the appropriate permissions. +chmod 0644 kubeconfig/kubeconfig diff --git a/pipelines/shared-tasks/deploy-to-acceptance-gke/task.yml b/pipelines/shared-tasks/deploy-to-acceptance-gke/task.yml new file mode 100644 index 000000000..2a8f28218 --- /dev/null +++ b/pipelines/shared-tasks/deploy-to-acceptance-gke/task.yml @@ -0,0 +1,106 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: ci-build-image +outputs: + - name: integration-test-env-vars + - name: kubeconfig +params: + # Used to get and manipulate the GKE acceptance cluster's kubeconfig. + PINNIPED_GCP_PROJECT: # project name + GKE_CLUSTER_NAME: # The name of the GKE cluster in gcloud. + GKE_USERNAME: + GKE_JSON_KEY: + + # Where to get the build image. Used to create an image pull Secret. + CI_BUILD_IMAGE_NAME: + CI_BUILD_IMAGE_SERVER: + CI_BUILD_IMAGE_USERNAME: + CI_BUILD_IMAGE_PASSWORD: + + # Use TMC's real webhook endpoint for some WebhookAuthenticator tests. + TMC_API_TOKEN: + TMC_CLUSTER_NAME: + + # Set up a LoadBalancer for the Supervisor. + RESERVED_LOAD_BALANCER_STATIC_IP: # An IP reserved for this purpose in our GCP project. + LOAD_BALANCER_DNS_NAME: # A DNS entry in our GCP project for the above IP address. + # Set up an Ingress for the Supervisor, as an alternate way to access it. + INGRESS_STATIC_IP_GCLOUD_NAME: # The name of a static IP reservation in our GCP project used for this purpose. + INGRESS_DNS_ENTRY_GCLOUD_NAME: # A DNS entry in our GCP project for the IP address represented by the above static IP reservation name. + + # Set to a non-empty value to remove the CPU requests from these deployments. + SUPERVISOR_AND_CONCIERGE_NO_CPU_REQUEST: + + # needed when we are testing against active directory. + TEST_ACTIVE_DIRECTORY: + AWS_AD_HOST: + AWS_AD_DOMAIN: + AWS_AD_BIND_ACCOUNT_USERNAME: + AWS_AD_BIND_ACCOUNT_PASSWORD: + AWS_AD_USER_USER_PRINCIPAL_NAME: + AWS_AD_USER_PASSWORD: + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME: + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE: + AWS_AD_USER_EXPECTED_GROUPS_DN: + AWS_AD_USER_EXPECTED_GROUPS_CN: + AWS_AD_LDAPS_CA_BUNDLE: + AWS_AD_DEACTIVATED_USER_SAMACCOUNTNAME: + AWS_AD_DEACTIVATED_USER_PASSWORD: + AWS_AD_USER_EMAIL_ATTRIBUTE_VALUE: + AWS_AD_USER_DEFAULTNAMINGCONTEXT_DN: + AWS_AD_USERS_DN: + + # Only needed when wanting to test using Okta instead of Dex. + OKTA_CLI_CALLBACK: + OKTA_CLI_CLIENT_ID: + OKTA_ADDITIONAL_SCOPES: + OKTA_USERNAME_CLAIM: + OKTA_GROUPS_CLAIM: + OKTA_ISSUER: + OKTA_PASSWORD: + OKTA_SUPERVISOR_CLIENT_ID: + OKTA_SUPERVISOR_CLIENT_SECRET: + OKTA_USERNAME: + OKTA_GROUPS: + OKTA_SUPERVISOR_CALLBACK: + + # only needed when wanting to test using Jumpcloud instead of OpenLDAP. + JUMPCLOUD_LDAP_HOST: + JUMPCLOUD_LDAP_STARTTLS_ONLY_HOST: + JUMPCLOUD_LDAP_BIND_ACCOUNT_USERNAME: + JUMPCLOUD_LDAP_BIND_ACCOUNT_PASSWORD: + JUMPCLOUD_LDAP_USERS_SEARCH_BASE: + JUMPCLOUD_LDAP_GROUPS_SEARCH_BASE: + JUMPCLOUD_LDAP_USER_DN: + JUMPCLOUD_LDAP_USER_CN: + JUMPCLOUD_LDAP_USER_PASSWORD: + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME: + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE: + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_NAME: + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_VALUE: + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_DN: + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_CN: + JUMPCLOUD_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN: + + # only needed when wanting to test using GitHub as an identity provider + PINNIPED_TEST_GITHUB_APP_CLIENT_ID: + PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET: + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID: + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET: + PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL: + PINNIPED_TEST_GITHUB_USER_USERNAME: + PINNIPED_TEST_GITHUB_USER_PASSWORD: + PINNIPED_TEST_GITHUB_USER_OTP_SECRET: + PINNIPED_TEST_GITHUB_USERID: + PINNIPED_TEST_GITHUB_ORG: + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES: + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS: + +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-to-acceptance-gke/task.sh diff --git a/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.sh b/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.sh new file mode 100755 index 000000000..90511ff06 --- /dev/null +++ b/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.sh @@ -0,0 +1,335 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# extract_env takes a JSON object representing an client.authentication.k8s.io/v1beta1 +# exec credential config (as parameter $1) and pulls out the env var value for the +# provided name (as parameter $2). +function extract_env_value() { + filter=".env[] | select(.name==\"$2\") | .value" + echo "$1" | jq -r "$filter" +} + +function print_or_redact_doc() { + doc_kind=$(echo "$1" | awk '/^kind: / {print $2}') + if [[ -z "$doc_kind" ]]; then + echo "warning: " + elif [[ $doc_kind == "Secret" || $doc_kind == "secret" ]]; then + echo + echo "---" + echo "" + else + printf "%s\n" "$1" + fi +} + +function print_redacted_manifest() { + doc="" + while IFS="" read -r line || [ -n "$line" ]; do + if [[ $line == "---" ]]; then + if [[ -n "$doc" ]]; then + print_or_redact_doc "$doc" + fi + doc="" + fi + doc=$(printf "%s\n%s" "$doc" "$line") + done <"$1" + + print_or_redact_doc "$doc" +} + +export KUBECONFIG="$PWD/cluster-pool/metadata" + +# See https://github.com/concourse/registry-image-resource#in-fetch-the-images-rootfs-and-metadata +export IMAGE_DIGEST="$(cat ci-build-image/digest)" +export IMAGE_REPO="$(cat ci-build-image/repository)" + +CLUSTER_CAPABILITIES_PATH="$PWD/$CLUSTER_CAPABILITIES_PATH" +if [ -n "$CLUSTER_CAPABILITIES" ]; then + echo "$CLUSTER_CAPABILITIES" >/tmp/cluster-capabilities.yaml + CLUSTER_CAPABILITIES_PATH=/tmp/cluster-capabilities.yaml +fi + +concierge_app_name=${PINNIPED_CONCIERGE_APP_NAME:-"pinniped-concierge"} +concierge_namespace=${concierge_app_name} +concierge_custom_labels="{myConciergeCustomLabelName: myConciergeCustomLabelValue}" +supervisor_app_name=${PINNIPED_SUPERVISOR_APP_NAME:-"pinniped-supervisor"} +supervisor_namespace=${supervisor_app_name} +supervisor_custom_labels="{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}" +discovery_url="${PINNIPED_DISCOVERY_URL:-null}" +manifest=/tmp/manifest.yaml + +test_username="test-username" +test_groups="test-group-0,test-group-1" +set +o pipefail +test_password="$(cat /dev/urandom | env LC_CTYPE=C tr -dc 'a-z0-9' | fold -w 32 | head -n 1)" +set -o pipefail +if [[ ${#test_password} -ne 32 ]]; then + log_error "Could not create random test user password" + exit 1 +fi +test_user_token="${test_username}:${test_password}" + +# Print for debugging +kubectl config current-context +kubectl version +kubectl cluster-info + +dex_test_password="${PINNIPED_DEX_TEST_USER_PASSWORD:-$(openssl rand -hex 16)}" +ldap_test_password="${PINNIPED_LDAP_TEST_USER_PASSWORD:-$(openssl rand -hex 16)}" + +# deploy local user authenticator +pushd pinniped/deploy/local-user-authenticator >/dev/null +echo "Creating install-local-user-authenticator.yaml..." +ytt --file . \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" >../../../deployment-yamls/install-local-user-authenticator.yaml +popd +pushd deployment-yamls >/dev/null +echo "Deploying local user authenticator to the cluster..." +kubectl apply -f install-local-user-authenticator.yaml +kubectl wait --for=condition=available --timeout=60s -n local-user-authenticator deployments/local-user-authenticator + +# Always create a secret. +echo "Creating test user '$test_username'..." +kubectl create secret generic "$test_username" \ + --namespace local-user-authenticator \ + --from-literal=groups="$test_groups" \ + --from-literal=passwordHash="$(htpasswd -nbBC 10 x "$test_password" | sed -e "s/^x://")" \ + --dry-run=client \ + --output yaml | + kubectl apply -f - + +# Override the TMC webhook settings to use the local-user-authenticator instead +webhook_url="https://local-user-authenticator.local-user-authenticator.svc.cluster.local/authenticate" + +# Sometimes the local-user-authenticator pod hasn't generated the serving certificate yet, so we poll until it has. +set +o pipefail +while ! kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator >/dev/null; do + echo "Waiting for local-user-authenticator Secret to be created..." + sleep 1 +done +set -o pipefail + +webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')" + +popd >/dev/null + +# always deploy dex +# +# Deploy tools +# +pushd pinniped/test/deploy/tools >/dev/null + +test_supervisor_upstream_oidc_callback_url="https://${supervisor_app_name}-clusterip.${supervisor_namespace}.svc.cluster.local/some/path/callback" + +supervisor_redirect_uris="[ + ${test_supervisor_upstream_oidc_callback_url} + ]" + +echo "Deploying Tools to the cluster..." +ytt --file . \ + --data-value-yaml "supervisor_redirect_uris=${supervisor_redirect_uris}" \ + --data-value "pinny_ldap_password=$ldap_test_password" \ + --data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \ + >"$manifest" + +echo +echo "Full Tools manifest with Secrets redacted..." +echo "--------------------------------------------------------------------------------" +print_redacted_manifest $manifest +echo "--------------------------------------------------------------------------------" +echo + +set -x +kapp deploy --yes --app tools --diff-changes --file "$manifest" +{ set +x; } 2>/dev/null + +dex_ca_bundle="$(kubectl get secrets -n tools certs -o go-template='{{index .data "ca.pem" | base64decode}}' | base64)" +pinniped_test_tools_namespace="tools" +test_cli_oidc_callback_url="http://127.0.0.1:48095/callback" +test_cli_oidc_client_id="pinniped-cli" +test_cli_oidc_issuer_ca_bundle="${dex_ca_bundle}" +test_cli_oidc_issuer="https://dex.tools.svc.cluster.local/dex" +test_cli_oidc_password="${dex_test_password}" +test_cli_oidc_username="pinny@example.com" +test_proxy="http://127.0.0.1:12346" +test_supervisor_upstream_oidc_client_id="pinniped-supervisor" +test_supervisor_upstream_oidc_client_secret="pinniped-supervisor-secret" +test_supervisor_upstream_oidc_additional_scopes="offline_access,email" +test_supervisor_upstream_oidc_username_claim="email" +test_supervisor_upstream_oidc_groups_claim="groups" +test_supervisor_upstream_oidc_issuer_ca_bundle="${dex_ca_bundle}" +test_supervisor_upstream_oidc_issuer="https://dex.tools.svc.cluster.local/dex" +test_supervisor_upstream_oidc_password="${dex_test_password}" +test_supervisor_upstream_oidc_username="pinny@example.com" +test_supervisor_upstream_oidc_groups="" # Dex's local user store does not let us configure groups. +pinniped_test_ldap_host="ldap.tools.svc.cluster.local" +pinniped_test_ldap_starttls_only_host="ldapstarttls.tools.svc.cluster.local" +pinniped_test_ldap_ldaps_ca_bundle="${dex_ca_bundle}" +pinniped_test_ldap_bind_account_username="cn=admin,dc=pinniped,dc=dev" +pinniped_test_ldap_bind_account_password=password +pinniped_test_ldap_users_search_base="ou=users,dc=pinniped,dc=dev" +pinniped_test_ldap_groups_search_base="ou=groups,dc=pinniped,dc=dev" +pinniped_test_ldap_user_dn="cn=pinny,ou=users,dc=pinniped,dc=dev" +pinniped_test_ldap_user_cn="pinny" +pinniped_test_ldap_user_password=${ldap_test_password} +pinniped_test_ldap_user_unique_id_attribute_name="uidNumber" +pinniped_test_ldap_user_unique_id_attribute_value="1000" +pinniped_test_ldap_user_email_attribute_name="mail" +pinniped_test_ldap_user_email_attribute_value="pinny.ldap@example.com" +pinniped_test_ldap_expected_direct_groups_dn="cn=ball-game-players,ou=beach-groups,ou=groups,dc=pinniped,dc=dev;cn=seals,ou=groups,dc=pinniped,dc=dev" +pinniped_test_ldap_expected_indirect_groups_dn="cn=pinnipeds,ou=groups,dc=pinniped,dc=dev;cn=mammals,ou=groups,dc=pinniped,dc=dev" +pinniped_test_ldap_expected_direct_groups_cn="ball-game-players;seals" +pinniped_test_ldap_expected_direct_posix_groups_cn="ball-game-players-posix;seals-posix" +pinniped_test_ldap_expected_indirect_groups_cn="pinnipeds;mammals" + +popd >/dev/null + +# deploy concierge +pushd pinniped/deploy/concierge >/dev/null +echo "Creating concierge deployment yamls..." +ytt --file . \ + --data-value "app_name=$concierge_app_name" \ + --data-value "namespace=$concierge_namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" \ + --data-value-yaml "image_pull_dockerconfigjson=${IMAGE_PULL_SECRET:-}" \ + --data-value "api_serving_certificate_duration_seconds=${API_SERVING_CERT_DURATION:-2592000}" \ + --data-value "api_serving_certificate_renew_before_seconds=${API_SERVING_CERT_RENEW_BEFORE:-2160000}" \ + --data-value "log_level=debug" \ + --data-value-yaml "custom_labels=$concierge_custom_labels" \ + --data-value "discovery_url=$discovery_url" >../../../deployment-yamls/install-pinniped-concierge.yaml + +popd +pushd deployment-yamls >/dev/null + +# create the two yaml files for kubectl based on the kapp one with everything in it +yq eval 'select(.kind == "CustomResourceDefinition" or .kind == "Namespace" or .kind == "ServiceAccount")' install-pinniped-concierge.yaml >install-pinniped-concierge-crds.yaml +yq eval 'select(.kind != "CustomResourceDefinition" and .kind != "Namespace" and .kind != "ServiceAccount")' install-pinniped-concierge.yaml > install-pinniped-concierge-resources.yaml + +set -x +echo "Deploying concierge crds to the cluster..." +kubectl apply -f install-pinniped-concierge-crds.yaml +kubectl wait --for condition="established" --timeout=60s crd -l app=pinniped-concierge +echo "Deploying concierge resources to the cluster..." +kubectl apply -f install-pinniped-concierge-resources.yaml +kubectl wait --for condition="available" --timeout=60s -n pinniped-concierge deployments/pinniped-concierge + +# deploy supervisor + +# set ytt values related to ingress. +supervisor_ytt_service_flags=() + +# We assume we are running on +# kind, and therefore expect to talk to the supervisor via NodePort and ClusterIP services. +# This nodePort is the same port number is hardcoded in the port forwarding of our kind configuration. +supervisor_ytt_service_flags+=("--data-value-yaml=service_https_nodeport_port=443") +supervisor_ytt_service_flags+=("--data-value-yaml=service_https_nodeport_nodeport=31243") +supervisor_ytt_service_flags+=("--data-value-yaml=service_https_clusterip_port=443") + +popd + +pushd pinniped/deploy/supervisor >/dev/null +echo "Creating install-pinniped-supervisor.yaml..." +ytt --file . \ + --data-value "app_name=$supervisor_app_name" \ + --data-value "namespace=$supervisor_namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" \ + --data-value-yaml "image_pull_dockerconfigjson=${IMAGE_PULL_SECRET:-}" \ + --data-value "log_level=debug" \ + --data-value-yaml "custom_labels=$supervisor_custom_labels" \ + "${supervisor_ytt_service_flags[@]}" \ + >../../../deployment-yamls/install-pinniped-supervisor.yaml +popd +pushd deployment-yamls >/dev/null + +echo "Deploying supervisor to the cluster..." +kubectl apply -f install-pinniped-supervisor.yaml +kubectl wait --for condition="available" --timeout=60s -n pinniped-supervisor deployments/pinniped-supervisor +popd >/dev/null +set +x + +# When we test on kind, we use "kubectl port-forward" in the task script to expose these ports for the integration tests. +supervisor_https_address='https://localhost:12344' +supervisor_https_ingress_address= +supervisor_https_ingress_ca_bundle= + +# +# Set up the integration test env vars +# +pinniped_cluster_capability_file_content=$(cat "pinniped/test/cluster_capabilities/kind.yaml") + +cat </tmp/integration-test-env +export PINNIPED_TEST_TOOLS_NAMESPACE='${pinniped_test_tools_namespace}' +export PINNIPED_TEST_CONCIERGE_NAMESPACE='${concierge_namespace}' +export PINNIPED_TEST_CONCIERGE_APP_NAME='${concierge_app_name}' +export PINNIPED_TEST_CONCIERGE_CUSTOM_LABELS='${concierge_custom_labels}' +export PINNIPED_TEST_USER_USERNAME='${test_username}' +export PINNIPED_TEST_USER_GROUPS='${test_groups}' +export PINNIPED_TEST_USER_TOKEN='${test_user_token}' +export PINNIPED_TEST_WEBHOOK_ENDPOINT='${webhook_url}' +export PINNIPED_TEST_WEBHOOK_CA_BUNDLE='${webhook_ca_bundle}' +export PINNIPED_TEST_SUPERVISOR_NAMESPACE='${supervisor_namespace}' +export PINNIPED_TEST_SUPERVISOR_APP_NAME='${supervisor_app_name}' +export PINNIPED_TEST_SUPERVISOR_CUSTOM_LABELS='${supervisor_custom_labels}' +export PINNIPED_TEST_SUPERVISOR_HTTPS_ADDRESS='${supervisor_https_address}' +export PINNIPED_TEST_SUPERVISOR_HTTPS_INGRESS_ADDRESS='${supervisor_https_ingress_address}' +export PINNIPED_TEST_SUPERVISOR_HTTPS_INGRESS_CA_BUNDLE='${supervisor_https_ingress_ca_bundle}' +export PINNIPED_TEST_PROXY='${test_proxy}' +export PINNIPED_TEST_LDAP_HOST='${pinniped_test_ldap_host}' +export PINNIPED_TEST_LDAP_STARTTLS_ONLY_HOST='${pinniped_test_ldap_starttls_only_host}' +export PINNIPED_TEST_LDAP_LDAPS_CA_BUNDLE='${pinniped_test_ldap_ldaps_ca_bundle}' +export PINNIPED_TEST_LDAP_BIND_ACCOUNT_USERNAME='${pinniped_test_ldap_bind_account_username}' +export PINNIPED_TEST_LDAP_BIND_ACCOUNT_PASSWORD='${pinniped_test_ldap_bind_account_password}' +export PINNIPED_TEST_LDAP_USERS_SEARCH_BASE='${pinniped_test_ldap_users_search_base}' +export PINNIPED_TEST_LDAP_GROUPS_SEARCH_BASE='${pinniped_test_ldap_groups_search_base}' +export PINNIPED_TEST_LDAP_USER_DN='${pinniped_test_ldap_user_dn}' +export PINNIPED_TEST_LDAP_USER_CN='${pinniped_test_ldap_user_cn}' +export PINNIPED_TEST_LDAP_USER_PASSWORD='${pinniped_test_ldap_user_password}' +export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME='${pinniped_test_ldap_user_unique_id_attribute_name}' +export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE='${pinniped_test_ldap_user_unique_id_attribute_value}' +export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_NAME='${pinniped_test_ldap_user_email_attribute_name}' +export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_VALUE='${pinniped_test_ldap_user_email_attribute_value}' +export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_DN='${pinniped_test_ldap_expected_direct_groups_dn}' +export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_DN='${pinniped_test_ldap_expected_indirect_groups_dn}' +export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_CN='${pinniped_test_ldap_expected_direct_groups_cn}' +export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN='${pinniped_test_ldap_expected_direct_posix_groups_cn}' +export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_CN='${pinniped_test_ldap_expected_indirect_groups_cn}' +export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL='${test_cli_oidc_callback_url}' +export PINNIPED_TEST_CLI_OIDC_CLIENT_ID='${test_cli_oidc_client_id}' +export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE='${test_cli_oidc_issuer_ca_bundle}' +export PINNIPED_TEST_CLI_OIDC_ISSUER='${test_cli_oidc_issuer}' +export PINNIPED_TEST_CLI_OIDC_PASSWORD='${test_cli_oidc_password}' +export PINNIPED_TEST_CLI_OIDC_USERNAME='${test_cli_oidc_username}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CALLBACK_URL='${test_supervisor_upstream_oidc_callback_url}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES='${test_supervisor_upstream_oidc_additional_scopes}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME_CLAIM='${test_supervisor_upstream_oidc_username_claim}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_GROUPS_CLAIM='${test_supervisor_upstream_oidc_groups_claim}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_ID='${test_supervisor_upstream_oidc_client_id}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_SECRET='${test_supervisor_upstream_oidc_client_secret}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE='${test_supervisor_upstream_oidc_issuer_ca_bundle}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER='${test_supervisor_upstream_oidc_issuer}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD='${test_supervisor_upstream_oidc_password}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME='${test_supervisor_upstream_oidc_username}' +export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_EXPECTED_GROUPS='${test_supervisor_upstream_oidc_groups}' +export PINNIPED_TEST_SHELL_CONTAINER_IMAGE="ghcr.io/pinniped-ci-bot/test-kubectl:latest" + +read -r -d '' PINNIPED_TEST_CLUSTER_CAPABILITY_YAML << PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF || true +${pinniped_cluster_capability_file_content} +PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF + +export PINNIPED_TEST_CLUSTER_CAPABILITY_YAML +EOF + +# Copy the env vars file that was output by the previous script which are needed during integration tests +cp /tmp/integration-test-env integration-test-env-vars/ +cp "$KUBECONFIG" kubeconfig/kubeconfig +cp "$PWD/cluster-pool/name" kubeconfig/cluster-name diff --git a/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.yml b/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.yml new file mode 100644 index 000000000..835fd0b10 --- /dev/null +++ b/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.yml @@ -0,0 +1,22 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: ci-build-image + - name: cluster-pool + - name: pinniped-password + optional: true +outputs: + - name: integration-test-env-vars + - name: kubeconfig + - name: deployment-yamls +params: + # one of these should be set + CLUSTER_CAPABILITIES: + CLUSTER_CAPABILITIES_PATH: +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-to-integration-kubectl-apply/task.sh diff --git a/pipelines/shared-tasks/deploy-to-integration/task.sh b/pipelines/shared-tasks/deploy-to-integration/task.sh new file mode 100755 index 000000000..c247608d4 --- /dev/null +++ b/pipelines/shared-tasks/deploy-to-integration/task.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +export KUBECONFIG="$PWD/cluster-pool/metadata" + +# See https://github.com/concourse/registry-image-resource#in-fetch-the-images-rootfs-and-metadata +export IMAGE_DIGEST="$(cat ci-build-image/digest)" +export IMAGE_REPO="$(cat ci-build-image/repository)" + +pinniped_ci="$PWD/pinniped-ci" + +CLUSTER_CAPABILITIES_PATH="$PWD/$CLUSTER_CAPABILITIES_PATH" +if [ -n "$CLUSTER_CAPABILITIES" ]; then + echo "$CLUSTER_CAPABILITIES" >/tmp/cluster-capabilities.yaml + CLUSTER_CAPABILITIES_PATH=/tmp/cluster-capabilities.yaml +fi + +if [[ -f pinniped-password/pinniped-dex-password ]]; then + pinniped_dex_test_user_password=$(cat pinniped-password/pinniped-dex-password) + pinniped_ldap_test_user_password=$(cat pinniped-password/pinniped-ldap-password) +else + pinniped_dex_test_user_password="" + pinniped_ldap_test_user_password="" +fi + +# If we are deploying this workload a second time, make sure we use a different +# app name and namespace for the Concierge and the Supervisor so that the two +# kapp apps don't clash with each other. +concierge_app_name=${PINNIPED_CONCIERGE_APP_NAME:-"concierge"} +supervisor_app_name=${PINNIPED_SUPERVISOR_APP_NAME:-"supervisor"} +concierge_namespace=${concierge_app_name} +supervisor_namespace=${supervisor_app_name} + +pushd pinniped >/dev/null + +PINNIPED_TEST_CLUSTER_CAPABILITY_FILE="$CLUSTER_CAPABILITIES_PATH" \ + DEPLOY_LOCAL_USER_AUTHENTICATOR=yes \ + DEPLOY_TEST_TOOLS=yes \ + CONCIERGE_APP_NAME="${concierge_app_name}" \ + CONCIERGE_NAMESPACE="${concierge_namespace}" \ + SUPERVISOR_APP_NAME="${supervisor_app_name}" \ + SUPERVISOR_NAMESPACE="${supervisor_namespace}" \ + PINNIPED_DEX_TEST_USER_PASSWORD="${pinniped_dex_test_user_password}" \ + PINNIPED_LDAP_TEST_USER_PASSWORD="${pinniped_ldap_test_user_password}" \ + "$pinniped_ci/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh" + +popd >/dev/null + +# Copy the env vars file that was output by the previous script which are needed during integration tests +cp /tmp/integration-test-env integration-test-env-vars/ +cp "$KUBECONFIG" kubeconfig/kubeconfig +cp "$PWD/cluster-pool/name" kubeconfig/cluster-name diff --git a/pipelines/shared-tasks/deploy-to-integration/task.yml b/pipelines/shared-tasks/deploy-to-integration/task.yml new file mode 100644 index 000000000..e949e2d34 --- /dev/null +++ b/pipelines/shared-tasks/deploy-to-integration/task.yml @@ -0,0 +1,105 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: ci-build-image + - name: cluster-pool + - name: pinniped-password + optional: true +outputs: + - name: integration-test-env-vars + - name: kubeconfig +params: + USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR: + + # one of these should be set + CLUSTER_CAPABILITIES: + CLUSTER_CAPABILITIES_PATH: + + # only needed for unusual test cases involving multiple Pinnipeds or custom API groups + PINNIPED_API_GROUP_SUFFIX: + SECONDARY_DEPLOY: + PINNIPED_SUPERVISOR_APP_NAME: + PINNIPED_CONCIERGE_APP_NAME: + SECONDARY_SUPERVISOR_APP_NAME: + SECONDARY_SUPERVISOR_NAMESPACE: + PINNIPED_SUPERVISOR_HTTP_NODEPORT: + PINNIPED_SUPERVISOR_HTTPS_NODEPORT: + + # only needed for unusual case of wanting to test the HTTPS_PROXY settings + FIREWALL_IDPS: + + # needed when we are testing against active directory. + TEST_ACTIVE_DIRECTORY: + AWS_AD_HOST: + AWS_AD_DOMAIN: + AWS_AD_BIND_ACCOUNT_USERNAME: + AWS_AD_BIND_ACCOUNT_PASSWORD: + AWS_AD_USER_USER_PRINCIPAL_NAME: + AWS_AD_USER_PASSWORD: + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME: + AWS_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE: + AWS_AD_USER_EXPECTED_GROUPS_DN: + AWS_AD_USER_EXPECTED_GROUPS_CN: + AWS_AD_LDAPS_CA_BUNDLE: + AWS_AD_DEACTIVATED_USER_SAMACCOUNTNAME: + AWS_AD_DEACTIVATED_USER_PASSWORD: + AWS_AD_USER_EMAIL_ATTRIBUTE_VALUE: + AWS_AD_USER_DEFAULTNAMINGCONTEXT_DN: + AWS_AD_USERS_DN: + + # Only needed when wanting to test using Okta instead of Dex. + # Note that this task does not accept OKTA_SUPERVISOR_CALLBACK. Not needed because + # the value of that variable can be determined from other variables. This task always + # deploys the tools namespace, so the Supervisor callback URL will use the + # squid proxy to access the Supervisor's callback endpoint. + OKTA_CLI_CALLBACK: + OKTA_CLI_CLIENT_ID: + OKTA_ADDITIONAL_SCOPES: + OKTA_USERNAME_CLAIM: + OKTA_GROUPS_CLAIM: + OKTA_ISSUER: + OKTA_PASSWORD: + OKTA_SUPERVISOR_CLIENT_ID: + OKTA_SUPERVISOR_CLIENT_SECRET: + OKTA_USERNAME: + OKTA_GROUPS: + + # only needed when wanting to test using Jumpcloud instead of OpenLDAP. + JUMPCLOUD_LDAP_HOST: + JUMPCLOUD_LDAP_STARTTLS_ONLY_HOST: + JUMPCLOUD_LDAP_BIND_ACCOUNT_USERNAME: + JUMPCLOUD_LDAP_BIND_ACCOUNT_PASSWORD: + JUMPCLOUD_LDAP_USERS_SEARCH_BASE: + JUMPCLOUD_LDAP_GROUPS_SEARCH_BASE: + JUMPCLOUD_LDAP_USER_DN: + JUMPCLOUD_LDAP_USER_CN: + JUMPCLOUD_LDAP_USER_PASSWORD: + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME: + JUMPCLOUD_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE: + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_NAME: + JUMPCLOUD_LDAP_USER_EMAIL_ATTRIBUTE_VALUE: + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_DN: + JUMPCLOUD_LDAP_EXPECTED_DIRECT_GROUPS_CN: + JUMPCLOUD_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN: + + # only needed when wanting to test using GitHub as an identity provider + PINNIPED_TEST_GITHUB_APP_CLIENT_ID: + PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET: + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID: + PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET: + PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL: + PINNIPED_TEST_GITHUB_USER_USERNAME: + PINNIPED_TEST_GITHUB_USER_PASSWORD: + PINNIPED_TEST_GITHUB_USER_OTP_SECRET: + PINNIPED_TEST_GITHUB_USERID: + PINNIPED_TEST_GITHUB_ORG: + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES: + PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS: + +run: + path: pinniped-ci/pipelines/shared-tasks/deploy-to-integration/task.sh diff --git a/pipelines/shared-tasks/detach-cluster/task.sh b/pipelines/shared-tasks/detach-cluster/task.sh new file mode 100755 index 000000000..5fbef1ed4 --- /dev/null +++ b/pipelines/shared-tasks/detach-cluster/task.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +tmc_api_token="$(cat tmc-api-token-and-tmc-cluster-name/tmc-api-token)" +tmc_cluster_name="$(cat tmc-api-token-and-tmc-cluster-name/tmc-cluster-name)" + +export TMC_API_TOKEN="$tmc_api_token" + +tmc login --no-configure --stg-stable --name detach-cluster-context +if ! tmc cluster list --name "$tmc_cluster_name" | grep -q 'No clusters to list'; then + tmc cluster delete --forget "$tmc_cluster_name" +else + echo "note: cluster '$tmc_cluster_name' does not exist, skipping detachment" +fi diff --git a/pipelines/shared-tasks/detach-cluster/task.yml b/pipelines/shared-tasks/detach-cluster/task.yml new file mode 100644 index 000000000..dbdd7f710 --- /dev/null +++ b/pipelines/shared-tasks/detach-cluster/task.yml @@ -0,0 +1,10 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: tmc-api-token-and-tmc-cluster-name + - name: pinniped-ci +run: + path: pinniped-ci/pipelines/shared-tasks/detach-cluster/task.sh diff --git a/pipelines/shared-tasks/export-cluster-diagnostics/task.sh b/pipelines/shared-tasks/export-cluster-diagnostics/task.sh new file mode 100755 index 000000000..6134cba1c --- /dev/null +++ b/pipelines/shared-tasks/export-cluster-diagnostics/task.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail +export KUBECONFIG="$PWD/cluster-pool/metadata" + +gcloud auth activate-service-account "$GCP_USERNAME" --key-file <(echo "$GCP_JSON_KEY") --project "$GCP_PROJECT" + +# Make a temp directory for the pod log files. +output_dir="$(mktemp -d)" + +# Get a list all the cluster pods with each line containing " ". +kubectl get pods -A -o custom-columns=ns:.metadata.namespace,name:.metadata.name \ + | tail +2 \ + | while read -r ns name ; do + echo "collecting pod logs from $ns/$name..." + mkdir -p "$output_dir/logs/$ns" + kubectl logs --all-containers -n "$ns" "$name" > "$output_dir/logs/$ns/$name.log" || true + kubectl logs --all-containers --previous -n "$ns" "$name" > "$output_dir/logs/$ns/$name.previous.log" 2>/dev/null || true +done + +# Delete any empty log files. +find "$output_dir/logs" -name "*.log" -size 0 -delete + +# Dump all Kubernetes resources (except Secrets) into ./resources/TYPE.json while ignoring discovery errors +mkdir -p "$output_dir/resources" +resources="$(kubectl api-resources --verbs=list -o name || true)" +echo -n "${resources}" \ + | grep -v secrets \ + | xargs -P4 -n1 -I{} sh -c "kubectl get --ignore-not-found -A -o json {} > $output_dir/resources/{}.json" + +# Dump secret metadata but not the actual contents +kubectl get --ignore-not-found -A -o wide secrets > $output_dir/resources/secrets.txt + +# Compress the logs into a .tgz file in the output directory. +random_string="$(openssl rand -hex 4)" +output_tgz="cluster-diagnostics-$random_string.tgz" +tar -czf "$output_tgz" -C "$output_dir" . + +# Upload the files into the GCS bucket under YYYY/MM/DD/cluster-diagnostics-XXXXXXXX.tgz +output_url_path="$(date +%Y)/$(date +%m)/$(date +%d)" +output_tgz_path="$output_url_path/$output_tgz" +gsutil cp "$output_tgz" "gs://$GCS_BUCKET/$output_tgz_path" + +if [ -d test-output ] ; then + # Take test output and make list of test successes and failures. This should include + # test name and time elapsed. + < test-output/testoutput.log jq -s 'map(select((.Action == "fail") or (.Action == "pass")))' > results.log + output_json_path="$output_url_path/results-$random_string.json" + gsutil cp results.log "gs://$GCS_BUCKET/$output_json_path" + results_link="https://storage.googleapis.com/${GCS_BUCKET}/${output_json_path}" +else + # Some "tests" don't actually run any Go tests, so there's nothing to upload here. + results_link="" +fi + +cat <&1 > install.log || cat install.log + + THIS_VERSION="v$(cat release-semver/version)" + PREVIOUS_VERSION="v$(cat previous-release-semver/version)" + + echo "$THIS_VERSION" >> release-info/version-with-v + + echo "$THIS_VERSION" >> release-info/image-tags + echo "v$(cat release-semver/version | cut -d'.' -f1-2)" >> release-info/image-tags + if [[ $RELEASE_TYPE == "minor" ]]; then + # When cutting a patch release, it is not safe to assume that it will become the newest + # release overall for that major line, because there could be a newer minor release already, + # so skip tagging with the major line for patch releases. + echo "v$(cat release-semver/version | cut -d'.' -f1)" >> release-info/image-tags + fi + + cat < + A complete list of changes can be found [here](https://github.com/vmware-tanzu/pinniped/compare/$PREVIOUS_VERSION...$THIS_VERSION). + + ## Acknowledgements + + - *TODO*: Did anyone outside the core team contribute code, bug reports, ideas, advice, etc. to this release? Thank them here and "at mention" them if possible. + + ## Note: All Commits (TODO: remove this section) + + *TODO*: Remove this section before making this draft public. This list of commits is only intended to help edit the sections above. + + EOT + + LAST_REF="$PREVIOUS_VERSION" + if ! git -C pinniped show-ref "$LAST_REF" ; then + LAST_REF="$(git -C pinniped rev-list --max-parents=0 HEAD)" + fi + + git -C pinniped log --no-decorate --pretty='format:%h - %an - %s' "$LAST_REF..HEAD" | tee -a release-info/body diff --git a/pipelines/shared-tasks/generate-pinniped-password/task.sh b/pipelines/shared-tasks/generate-pinniped-password/task.sh new file mode 100755 index 000000000..e14c433bc --- /dev/null +++ b/pipelines/shared-tasks/generate-pinniped-password/task.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +openssl rand -hex 16 > pinniped-password/pinniped-dex-password +openssl rand -hex 16 > pinniped-password/pinniped-ldap-password diff --git a/pipelines/shared-tasks/generate-pinniped-password/task.yml b/pipelines/shared-tasks/generate-pinniped-password/task.yml new file mode 100644 index 000000000..d2a6fe827 --- /dev/null +++ b/pipelines/shared-tasks/generate-pinniped-password/task.yml @@ -0,0 +1,12 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci +outputs: + - name: pinniped-password +params: +run: + path: pinniped-ci/pipelines/shared-tasks/generate-pinniped-password/task.sh \ No newline at end of file diff --git a/pipelines/shared-tasks/install-and-configure-cert-manager/sample-federation-domain.yaml b/pipelines/shared-tasks/install-and-configure-cert-manager/sample-federation-domain.yaml new file mode 100644 index 000000000..207a7cad3 --- /dev/null +++ b/pipelines/shared-tasks/install-and-configure-cert-manager/sample-federation-domain.yaml @@ -0,0 +1,26 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Here is an example FederationDomain which will use the letsencrypt certs created by this task. +# +# Use `kubectl apply -f sample-federation-domain.yaml` to apply it to the acceptance cluster. +# +# Don't forget to `kubectl delete -f sample-federation-domain.yaml` when you are done +# with manual testing, otherwise it will interfere with the integration tests when +# CI runs them on the acceptance cluster. +# +# You should be able to curl this FederationDomain's discovery endpoint +# with cert verification like this: +# curl -v https://le.test.pinniped.dev/test-fd/.well-known/openid-configuration + +--- +apiVersion: config.supervisor.pinniped.dev/v1alpha1 +kind: FederationDomain +metadata: + name: federation-domain-for-manual-testing + namespace: supervisor-acceptance +spec: + # Using a path here is optional and allows multiple FederationDomains at the same DNS name. + issuer: https://le.test.pinniped.dev/test-fd + tls: + secretName: supervisor-letsencrypt-tls-certificate diff --git a/pipelines/shared-tasks/install-and-configure-cert-manager/task.sh b/pipelines/shared-tasks/install-and-configure-cert-manager/task.sh new file mode 100755 index 000000000..0dbae774e --- /dev/null +++ b/pipelines/shared-tasks/install-and-configure-cert-manager/task.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# Install and configure cert-manager to generate letsencrypt TLS certs for the supervisor +# which can be used for manual testing on the cluster. In order to use it, create a +# FederationDomain which specifies an issuer with the same name DNS name as the certificate +# and also specifies the same secretName as used below in the Certificate CR. +# The DNS record for the DNS name use here should be manually created elsewhere to point +# to the Supervisor's load balancer IP address. +# +# See sample-federation-domain.yaml in this directory for an example FederationDomain +# which will use these letsencrypt certs, which you can apply to the acceptance cluster. + +if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then + echo "PINNIPED_GCP_PROJECT env var must be set" + exit 1 +fi + +# Use the kubeconfig from the task inputs. +export KUBECONFIG="$(pwd)/kubeconfig/kubeconfig" + +# Load some deployment related env vars, like PINNIPED_TEST_SUPERVISOR_NAMESPACE, from the task inputs. +source integration-test-env-vars/integration-test-env + +# Install or update cert-manager. This will create a "cert-manager" namespace. +kapp deploy --yes --app cert-manager --diff-changes \ + --file "https://github.com/cert-manager/cert-manager/releases/download/v1.10.1/cert-manager.yaml" + +# Configure a DNS admin account for cert-manager to use. +cat </dev/null + all_local=($(find . -name 'kind-worker-*' -type f -exec basename {} ';' | sort)) + popd >/dev/null +fi + +all_cloud=($(gcloud compute instances list \ + --zones "$INSTANCE_ZONE" --project "$GCP_PROJECT" \ + --filter 'name:kind-worker-*' --format 'table[no-heading](name)' | sort)) + +exists_in_local_but_not_cloud=() +for i in "${all_local[@]}"; do + found= + for j in "${all_cloud[@]}"; do + if [[ "$i" == "$j" ]]; then + found=yes + break + fi + done + if [[ "$found" != "yes" ]]; then + exists_in_local_but_not_cloud+=("$i") + fi +done + +exists_in_cloud_but_not_local=() +exists_in_cloud_but_not_local_relative_path=() +for i in "${all_cloud[@]}"; do + found= + for j in "${all_local[@]}"; do + if [[ "$i" == "$j" ]]; then + found=yes + break + fi + done + if [[ "$found" != "yes" ]]; then + exists_in_cloud_but_not_local+=("$i") + fi +done + +if [[ -d pinniped-ci-pool ]]; then + pushd pinniped-ci-pool >/dev/null + echo + echo "All pool repo kind cluster files which do not have a running VM instance:" + for i in "${exists_in_local_but_not_cloud[@]}"; do + echo -n "$i " + relative_path=$(find . -name "$i" -type f) + exists_in_cloud_but_not_local_relative_path+=("$relative_path") + git --no-pager log -n1 --pretty=format:"%h%x09%an%x09%ad%x09%s" "$relative_path" + echo + done + if [[ ${#exists_in_local_but_not_cloud[@]} -eq 0 ]]; then + echo "none" + fi + popd >/dev/null +fi + +now_in_seconds_since_epoch=$(date +"%s") +hours_ago_to_delete=2 +vms_to_remove=() + +echo +echo "All VM instances with no corresponding pool repo file (with creation time in UTC):" +for i in "${exists_in_cloud_but_not_local[@]}"; do + creation_time=$(gcloud compute instances describe "$i" \ + --zone "$INSTANCE_ZONE" --project "$GCP_PROJECT" \ + --format 'table[no-heading](creationTimestamp.date(tz=UTC))') + # UTC date format example: 2022-04-01T17:01:59 + if [[ "$creation_time" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$ ]]; then + # Note: on MacOS this date command would be: date -ju -f '%Y-%m-%dT%H:%M:%S' "$creation_time" '+%s' + creation_time_seconds_since_epoch=$(date -u -d "$creation_time" '+%s') + if (($((now_in_seconds_since_epoch - creation_time_seconds_since_epoch)) > $((hours_ago_to_delete * 60 * 60)))); then + vms_to_remove+=("$i") + echo "$i $creation_time (older than $hours_ago_to_delete hours)" + else + echo "$i $creation_time (less than $hours_ago_to_delete hours old)" + fi + else + echo "VM creation time not in expected time format: $creation_time" + exit 1 + fi +done +if [[ ${#exists_in_cloud_but_not_local[@]} -eq 0 ]]; then + echo "none" +fi + +echo +if [[ ${#vms_to_remove[@]} -eq 0 ]]; then + echo "No old orphaned VMs found to remove." +else + echo "Removing ${#vms_to_remove[@]} VM(s) which are older than $hours_ago_to_delete hours in $INSTANCE_ZONE: ${vms_to_remove[*]} ..." + gcloud compute instances delete --zone "${INSTANCE_ZONE}" --delete-disks all --quiet ${vms_to_remove[*]} +fi + +echo +echo "Done!" diff --git a/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml b/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml new file mode 100644 index 000000000..81ff52be6 --- /dev/null +++ b/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml @@ -0,0 +1,17 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped-ci + - name: pinniped-ci-pool + optional: true +outputs: +params: + INSTANCE_ZONE: + GCP_PROJECT: + GCP_USERNAME: + GCP_JSON_KEY: +run: + path: pinniped-ci/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.sh diff --git a/pipelines/shared-tasks/run-go-vuln-scan/task.sh b/pipelines/shared-tasks/run-go-vuln-scan/task.sh new file mode 100755 index 000000000..272140b81 --- /dev/null +++ b/pipelines/shared-tasks/run-go-vuln-scan/task.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +go version +go install golang.org/x/vuln/cmd/govulncheck@latest + +cd pinniped + +OPTS="-json -test" + +if [[ -n "${BUILD_TAGS:-}" ]]; then + OPTS="${OPTS} -tags ${BUILD_TAGS}" +fi + +OPTS="${OPTS} ./..." + +govulncheck ${OPTS} diff --git a/pipelines/shared-tasks/run-go-vuln-scan/task.yml b/pipelines/shared-tasks/run-go-vuln-scan/task.yml new file mode 100644 index 000000000..39d05e440 --- /dev/null +++ b/pipelines/shared-tasks/run-go-vuln-scan/task.yml @@ -0,0 +1,18 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped + - name: pinniped-ci +outputs: +params: + BUILD_TAGS: +run: + path: pinniped-ci/pipelines/shared-tasks/run-go-vuln-scan/task.sh diff --git a/pipelines/shared-tasks/run-integration-tests/task.sh b/pipelines/shared-tasks/run-integration-tests/task.sh new file mode 100755 index 000000000..e33d4c12a --- /dev/null +++ b/pipelines/shared-tasks/run-integration-tests/task.sh @@ -0,0 +1,280 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Run the integration tests against a remote target cluster. +# +# This script is designed to be run both in CI as a task (see task.yaml) +# and on a development workstation (see hack/prepare-remote-cluster-for-integration-tests.sh). +# When editing this file, please ensure that both methods of running will still work. +# All required file/directory paths can be controlled by env vars so they can default +# to the CI task values but be overridden by an invocation on your workstation. +# +# This script assumes that the app is already deployed into the remote target cluster +# and that the necessary env vars are provided. + +set -euo pipefail + +export GOCACHE="$PWD/cache/gocache" +export GOMODCACHE="$PWD/cache/gomodcache" + +# When run as a CI task the initial working directory is the directory above all of the +# inout directories. +initial_working_directory=$(pwd) + +# Set some default paths that would apply when run as a CI task. +# There is no equivalent needed for these when running on your development laptop. +pinniped_test_cli="$initial_working_directory/ci-test-image/rootfs/usr/local/bin/pinniped" +integration_test_binary="$initial_working_directory/ci-test-image/rootfs/usr/local/bin/pinniped-integration-test" + +# Set up the KUBECONFIG for the integration tests to use. +# To make it possible to run this script on your workstation, first check to see if $KUBECONFIG is already set by the caller. +kubeconfig=${KUBECONFIG:-"$initial_working_directory/kubeconfig/kubeconfig"} +echo "Using kubeconfig file $kubeconfig" +export KUBECONFIG="$kubeconfig" + +# Load the env vars that were output by the previous script which are needed during go test +# To make it possible to run this script on your workstation, first check to see if an alternate path is set. +test_env_path=${TEST_ENV_PATH:-"integration-test-env-vars/integration-test-env"} +echo "Using test env file $test_env_path" +source "$test_env_path" + +# cd to the source code repo. +# To make it possible to run this script on your workstation, first check to see if an alternate path is set. +source_path=${SOURCE_PATH:-"pinniped"} +cd "$source_path" +echo "Using source code directory $(pwd)" + +# Some supervisor deployment settings, with default values that are appropriate defaults for both the CI task +# and for running on development workstations. These can be overridden to allow testing of secondary deploys +# i.e. when there are two Pinnipeds running on the same cluster. +supervisor_namespace=${PINNIPED_SUPERVISOR_NAMESPACE:-"supervisor"} +supervisor_nodeport_service=${PINNIPED_SUPERVISOR_NODEPORT_SERVICE:-"supervisor-nodeport"} +supervisor_https_host_port=${PINNIPED_SUPERVISOR_HTTPS_HOST_PORT:-12344} # see gce-init.sh for the meaning of this port + +# Prepare to clean up any background jobs that we might start below. +background_pids=() +function cleanup() { + if [[ "${#background_pids[@]}" -gt "0" ]]; then + echo "Cleaning up background processes..." + # Kill all background jobs. Can't use the $background_pids here since some of the commands that we + # put into the background are pipelines of multiple commands, and $background_pids only holds the pids + # of the last command in each pipeline. `jobs -p` is the pids of the first command in each pipeline. + jobs -p | xargs kill + fi +} +trap cleanup EXIT + +# See kind port mappings in gce-init.sh for what these port number values hook into on a remote kind cluster. +# See single-node.yaml for the same port numbers when running a kind cluster on your development laptop. +ssh_mappings=( + # The Pinniped Supervisor's https port. + "127.0.0.1:12344:127.0.0.1:${supervisor_https_host_port}" + # The squid proxy port. We run squid inside the cluster to allow the tests + # to use it as an http_proxy to access all Services inside the cluster. + "127.0.0.1:12346:127.0.0.1:12346" +) + +kubectl_mapping_command1=( + # The Pinniped Supervisor's https port. + kubectl port-forward -n "$supervisor_namespace" "svc/$supervisor_nodeport_service" 12344:443 -v 9 +) +kubectl_mapping_command2=( + # The squid proxy port. We run squid inside the cluster to allow the tests + # to use it as an http_proxy to access all Services inside the cluster. + kubectl port-forward -n tools svc/proxy 12346:3128 -v 9 +) +# The above variables are not unused, as shellcheck warns. They are passed by name into this array. +kubectl_mapping_commands=(kubectl_mapping_command1 kubectl_mapping_command2) + +# The health checks that we should run before running the tests to ensure that our port mappings are ready. +port_health_checks=( + # The Pinniped Supervisor's https port. + "curl -fsk https://127.0.0.1:12344/healthz" + # The squid proxy port. + "https_proxy=127.0.0.1:12346 curl -fsk https://dex.tools.svc.cluster.local/dex/.well-known/openid-configuration" +) + +# Use "gcloud ssh" to forward ports of remote kind clusters because "kubectl port-forward" +# proved to be unreliable in that use case. +if [[ "${START_GCLOUD_PROXY:-no}" == "yes" ]]; then + if [[ -z "${GCP_ZONE:-}" || -z "${GCP_PROJECT:-}" ]]; then + echo "\$GCP_ZONE and \$GCP_PROJECT are required when START_GCLOUD_PROXY==yes" + exit 1 + fi + + # If the GCP_USERNAME env var was set, then use it along with $GCP_JSON_KEY to log in as a service account. + # When running on your laptop we will assume that you are already logged in to gcloud as yourself. + if [[ -n "${GCP_USERNAME:-}" ]]; then + echo "Signing in to gcloud as service account $GCP_USERNAME ..." + gcloud auth activate-service-account \ + "$GCP_USERNAME" \ + --key-file <(echo "$GCP_JSON_KEY") \ + --project "$GCP_PROJECT" + fi + + # For using "gcloud ssh" with a remote kind cluster below, we'll need to know the name of the cluster. + if [[ -f "$initial_working_directory/kubeconfig/cluster-name" ]]; then + # In CI we set the cluster name as another file in the kubeconfig input directory. + cluster_name="$(cat "$initial_working_directory/kubeconfig/cluster-name")" + else + # When running on your development workstation, the name of the file is the name of the cluster. + cluster_name="$(basename "$KUBECONFIG")" + fi + + # Make a private key that can be used for all ssh commands below, if one does not already exist. + # Check if it exists because there is no need to regenerate it when running on your development workstation. + ssh_key_file="$HOME/.ssh/pinniped-integration-test-key" + if [[ ! -f "$ssh_key_file" ]]; then + # Generate a private key which has no password, output to $ssh_key_file. + ssh-keygen -t rsa -b 4096 -q -N "" -f "$ssh_key_file" + fi + + # Use a unique username for each test invocation so that each test invocation will upload a new public key to our GCP project. + # This allows any number of port forwards across parallel test runs to be independent. + # Note that this username must be 32 character or less. + unique_username="int-test-$(openssl rand -hex 8)" + + # When run in CI, the service account should not have permission to create project-wide keys, so explicitly add the + # key only to the specific VM instance (as VM metadata). We don't want to pollute the project-wide keys with these. + # See https://cloud.google.com/compute/docs/connect/add-ssh-keys#after-vm-creation for explanation of these commands. + # Note that this overwrites all ssh keys in the metadata. At the moment, these VMs have no ssh keys in the metadata + # upon creation, so it should always be okay to overwrite the empty value. However, if someday they need to have some + # initial ssh keys in the metadata for some reason, and if those keys need to be preserved for some reason, then + # these commands could be enhanced to instead read the keys, add to them, and write back the new list. + future_time="$(date --utc --date '+3 hours' '+%FT%T%z')" + echo \ + "${unique_username}:$(cat "${ssh_key_file}.pub") google-ssh {\"userName\":\"${unique_username}\",\"expireOn\":\"${future_time}\"}" \ + > /tmp/ssh-key-values + gcloud compute instances add-metadata "$cluster_name" \ + --metadata-from-file ssh-keys=/tmp/ssh-key-values \ + --zone "$GCP_ZONE" --project "$GCP_PROJECT" + + # Get the IP so we can stop using gcloud ssh and start using regular ssh, now that it has been set up. + # gcloud ssh seems to complain that the "remote host identification has changed" sometimes and there + # seems to be no way to avoid it. :( So we'll use regular ssh. + gcloud_instance_ip=$(gcloud compute instances describe \ + --zone "$GCP_ZONE" --project "$GCP_PROJECT" "${cluster_name}" \ + --format='get(networkInterfaces[0].accessConfigs[0].natIP)') + + # Now start some simultaneous background jobs. + for mapping in "${ssh_mappings[@]}"; do + echo "Starting ssh for temporary user ${unique_username} to map port ${mapping} ..." + ssh "${unique_username}@${gcloud_instance_ip}" -i "${ssh_key_file}" \ + -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ + -o ExitOnForwardFailure=yes -NT -L "${mapping}" & + background_pids+=($!) + done + +# For other kinds of remote clusters, "kubectl port-forward" works fine. +elif [[ "${START_KUBECTL_PROXY:-no}" == "yes" ]]; then + for cmd_and_args_array_name in "${kubectl_mapping_commands[@]}"; do + # All these array gymnastics are to avoid using eval to run the command because + # eval makes it very hard to kill the background kubectl process during trap cleanup. + cmd_and_args_array_name="$cmd_and_args_array_name""[@]" + cmd_and_args_array=("${!cmd_and_args_array_name}") + echo "Starting " "${cmd_and_args_array[@]}" + "${cmd_and_args_array[@]}" | grep --line-buffered -v "Handling connection" & + background_pids+=($!) + done +fi + +# Give a few moments for the background commands to run, only to avoid having their stdout +# interleave so much with the stdout of the commands that we're about to do below. This is +# not for correctness to avoid a race, because the while loop below is doing that. +sleep 10 + +# If we started either style of port forwarding above, then wait for all of the ports to +# start working before we start the integration tests to avoid a race between the port +# forwarding starting and the first integration test which tries to use one of these ports. +if [[ "${START_GCLOUD_PROXY:-no}" == "yes" || "${START_KUBECTL_PROXY:-no}" == "yes" ]]; then + while true; do + sleep 1 + for pid in "${background_pids[@]}"; do + if ! ps -p "$pid" >/dev/null; then + echo "Background port-forward process $pid seems to have died. Exiting. :(" + exit 1 + fi + done + succeeded=true + # Try to curl an endpoint which should succeed through each port-forwarded port. + for health_check in "${port_health_checks[@]}"; do + echo "$health_check" + if ! eval "$health_check" >/dev/null; then + succeeded=false + break + fi + done + if [[ $succeeded == "true" ]]; then + echo "All port-forwarded ports are ready." + break + fi + echo "Waiting for port-forwarded ports to be ready..." + done +fi + +# Print version for logs. +go version +if [[ "$OSTYPE" != "darwin"* ]]; then + google-chrome --version +fi + +# If the cli has been pre-compiled then use it. +if [[ -f "$pinniped_test_cli" ]]; then + export PINNIPED_TEST_CLI="$pinniped_test_cli" +fi + +# Unset this before running the integration tests, to try to hide this GCP_JSON_KEY credential from the tests. +if [[ -n "${GCP_JSON_KEY:-}" ]]; then + unset GCP_JSON_KEY GCP_ZONE GCP_PROJECT GCP_USERNAME +fi + +if [ -d "../test-output" ]; then + # this is probably running in CI, and test-output is the name of the concourse output directory + # that we put the file in so that the next task can upload it to GCS. + # we need to chmod it so our non-root user can write to it. + chmod 777 ../test-output + jsonfile_arg="../test-output/testoutput.log" +else + # otherwise, we're probably running locally and don't actually want to output the logs to a file + # to aggregate and analyze later. + jsonfile_arg="/dev/null" +fi + +test_run_regex=${TEST_RUN_REGEX:-'.*'} + +if [[ -f "$integration_test_binary" ]]; then + # If the integration test suite has been pre-compiled, then use it to run the tests. + test_command="gotestsum --raw-command --format standard-verbose --jsonfile $jsonfile_arg -- go tool test2json -t -p pkgname \"$integration_test_binary\" -test.v -test.count=1 -test.timeout=70m -test.run='${test_run_regex}'" +else + # Otherwise just run the tests with "go test". + test_command="gotestsum --format standard-verbose --jsonfile $jsonfile_arg ./test/integration/ -- -race -v -count 1 -timeout 70m -run '${test_run_regex}'" +fi + +# Run the integration tests. They can assume that the app is already deployed +# and that kubectl is configured to talk to the cluster. They also have the +# k14s tools available (ytt, kapp, etc) in case they want to do more deploys. +if [[ "$(id -u)" == "0" ]]; then + # Downgrade to a non-root user to run the tests. We don't want them reading the + # environment of any parent process, e.g. by reading from /proc. This user account + # was created in the Dockerfile of the container image used to run this script in CI. + # It is okay if $GCP_JSON_KEY is empty or unset, either way we've avoided sharing the + # credential with the subprocess. + if [[ -n $(su testrunner -c "echo $GCP_JSON_KEY") ]]; then + echo "Tried to obscure the GCP_JSON_KEY secret from the testrunner user but it didn't work!" + exit 1 + fi + # This should not be necessary, but something strange started happening after upgrading to Concourse v7.7.0 + # where sometimes the owner and group IDs of these directories are wrong inside the container on Concourse. + # Attempting the following chown as a workaround, which should change the owner/group of the files back to + # what they were in the container image. + chown -R testrunner:testrunner /home/testrunner + echo "Downgrading to user testrunner and running: ${test_command}" + # su without "-" keeps the parent environment variables, but we've already deleted the credential variables. + su testrunner -c "$test_command" +else + # Already non-root, so just run as yourself. + echo "Running: ${test_command}" + eval "$test_command" +fi diff --git a/pipelines/shared-tasks/run-integration-tests/task.yml b/pipelines/shared-tasks/run-integration-tests/task.yml new file mode 100644 index 000000000..c1607e7cc --- /dev/null +++ b/pipelines/shared-tasks/run-integration-tests/task.yml @@ -0,0 +1,31 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: integration-test-env-vars + - name: kubeconfig + - name: ci-test-image + optional: true +outputs: + - name: test-output +params: + GCP_ZONE: + GCP_PROJECT: + GCP_USERNAME: + GCP_JSON_KEY: + PINNIPED_SUPERVISOR_NAMESPACE: + PINNIPED_SUPERVISOR_NODEPORT_SERVICE: + PINNIPED_SUPERVISOR_HTTPS_HOST_PORT: + TEST_RUN_REGEX: + + # Only set one of these to "yes": + START_GCLOUD_PROXY: + START_KUBECTL_PROXY: +run: + path: pinniped-ci/pipelines/shared-tasks/run-integration-tests/task.sh +caches: + - path: cache diff --git a/pipelines/shared-tasks/run-kubectl-uninstall/task.sh b/pipelines/shared-tasks/run-kubectl-uninstall/task.sh new file mode 100755 index 000000000..b29419f2d --- /dev/null +++ b/pipelines/shared-tasks/run-kubectl-uninstall/task.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euxo pipefail + +export KUBECONFIG="$PWD/cluster-pool/metadata" + +# uninstall concierge +echo "deleting concierge from the cluster..." +kubectl delete -f deployment-yamls/install-pinniped-concierge-resources.yaml +kubectl wait --for=delete --timeout=60s -n pinniped-concierge deployments/pinniped-concierge +kubectl delete -f deployment-yamls/install-pinniped-concierge-crds.yaml +kubectl wait --for=delete --timeout=60s crd -l app=pinniped-concierge + +# uninstall local user authenticator +echo "deleting local user authenticator from the cluster..." +kubectl delete -f deployment-yamls/install-local-user-authenticator.yaml +kubectl wait --for=delete --timeout=60s -n local-user-authenticator deployments/local-user-authenticator + +# uninstall supervisor +echo "deleting supervisor from the cluster..." +kubectl delete -f deployment-yamls/install-pinniped-supervisor.yaml +kubectl wait --for=delete --timeout=60s -n pinniped-supervisor deployments/pinniped-supervisor + diff --git a/pipelines/shared-tasks/run-kubectl-uninstall/task.yaml b/pipelines/shared-tasks/run-kubectl-uninstall/task.yaml new file mode 100644 index 000000000..56562a2db --- /dev/null +++ b/pipelines/shared-tasks/run-kubectl-uninstall/task.yaml @@ -0,0 +1,12 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: deployment-yamls + - name: cluster-pool +run: + path: pinniped-ci/pipelines/shared-tasks/run-kubectl-uninstall/task.sh \ No newline at end of file diff --git a/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh b/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh new file mode 100755 index 000000000..ef4d65ec1 --- /dev/null +++ b/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh @@ -0,0 +1,134 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script can be used in CI and on a developer's workstation. +# It assumes that the current working directory is the top of +# our main source code repo. + +# The goal of this test is to demonstrate that it is possible +# to perform a clean uninstall by deleting all resources that have +# a custom label after installing the apps into a preexisting +# namespace. + +# Print for debugging +kubectl config current-context +kubectl version +kubectl cluster-info + +before=/tmp/everything_in_cluster_before_installing_apps.json +after_install=/tmp/everything_in_cluster_after_installing_apps.json +after_delete=/tmp/everything_in_cluster_after_deleting_apps.json + +function kapp_inspect() { + set -x + kapp inspect -a 'label:' --json \ + --column 'kind,name,namespace' \ + --filter '{"not":{"resource":{"kinds":["Event","EndpointSlice"]}}}' | + jq .Tables[0].Rows >"$1" + { set +x; } 2>/dev/null +} + +namespace=preexisting-namespace + +cat </dev/null +ytt --file . \ + --data-value "app_name=$concierge_app_name" \ + --data-value "into_namespace=$namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" | + kapp deploy --yes --app "$concierge_app_name" --diff-changes --file - +popd >/dev/null + +echo "Deploying the Supervisor app to the cluster..." +pushd deploy/supervisor >/dev/null +ytt --file . \ + --data-value "app_name=$supervisor_app_name" \ + --data-value "into_namespace=$namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" | + kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file - +popd >/dev/null + +# Create any additional configs that will cause controllers to create additional resources. +# Note that this resource will be auto-deleted when the FederationDomain CRD is deleted. +cat </dev/null + +echo "Sleeping 30..." +sleep 30 # Give a little time for things to finish cascading deletes. + +kapp_inspect $after_delete + +echo "Performing diff of before install state vs. current..." +if ! diff "$before" "$after_delete"; then + echo "Test failed! App uninstall left garbage behind." + exit 1 +else + echo "Test passed!" +fi diff --git a/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh b/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh new file mode 100755 index 000000000..b9cbcc322 --- /dev/null +++ b/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# This script can be used in CI and on a developer's workstation. +# It assumes that the current working directory is the top of +# our main source code repo. + +# Print for debugging +kubectl config current-context +kubectl version +kubectl cluster-info + +before=/tmp/everything_in_cluster_before_installing_apps.json +after_install=/tmp/everything_in_cluster_after_installing_apps.json +after_delete=/tmp/everything_in_cluster_after_deleting_apps.json + +function kapp_inspect() { + set -x + kapp inspect -a 'label:' --json \ + --column 'kind,name,namespace' \ + --filter '{"not":{"resource":{"kinds":["Event","EndpointSlice"]}}}' | + jq .Tables[0].Rows >"$1" + { set +x; } 2>/dev/null +} + +# Wait for the cluster to finish creating all of its namespaces. +echo "Sleeping 30..." +sleep 30 + +# When using a kind cluster, there is a resource that takes ~40s to appear. Wait for it. +if kubectl get namespaces -o name | grep -q local-path-storage; then + echo -n "Waiting for local-path-storage" + foundLocalPathStorage="0" + for i in $(seq 1 120); do + foundLocalPathStorage=$(kubectl get pods -n local-path-storage -o name | wc -l | tr -d ' ') + if [[ "$foundLocalPathStorage" != "0" ]]; then + break + fi + echo -n "." + sleep 1 + done + if [[ "$foundLocalPathStorage" != "0" ]]; then + echo " found" + else + echo " NOT found" + echo "ERROR: Timed out waiting for local-path-storage" + exit 1 + fi +fi + +# Wait for anything else left to be ready as well. +echo "Sleeping another 30..." +sleep 30 + +kapp_inspect $before + +concierge_app_name=pinniped-concierge +concierge_namespace=pinniped-concierge +supervisor_app_name=pinniped-supervisor +supervisor_namespace=pinniped-supervisor +local_user_authenticator_app_name=pinniped-local-user-authenticator + +echo "Deploying the Concierge app to the cluster..." +pushd deploy/concierge >/dev/null +ytt --file . \ + --data-value "app_name=$concierge_app_name" \ + --data-value "namespace=$concierge_namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" | + kapp deploy --yes --app "$concierge_app_name" --diff-changes --file - +popd >/dev/null + +echo "Deploying the Supervisor app to the cluster..." +pushd deploy/supervisor >/dev/null +ytt --file . \ + --data-value "app_name=$supervisor_app_name" \ + --data-value "namespace=$supervisor_namespace" \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" | + kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file - +popd >/dev/null + +echo "Deploying the local-user-authenticator app to the cluster..." +pushd deploy/local-user-authenticator >/dev/null +ytt --file . \ + --data-value "image_repo=$IMAGE_REPO" \ + --data-value "image_digest=${IMAGE_DIGEST:-}" \ + --data-value "image_tag=${IMAGE_TAG:-}" | + kapp deploy --yes --app "$local_user_authenticator_app_name" --diff-changes --file - +popd >/dev/null + +echo "Sleeping 30..." +sleep 30 # Give a little time for controllers to run, etc. + +kapp_inspect $after_install + +echo "Deleting apps from the cluster..." +set -x +kapp delete --app "$concierge_app_name" --wait-timeout 2m --yes +kapp delete --app "$supervisor_app_name" --wait-timeout 2m --yes +kapp delete --app "$local_user_authenticator_app_name" --wait-timeout 2m --yes +{ set +x; } 2>/dev/null + +echo "Sleeping 30..." +sleep 30 # Give a little time for things to finish cascading deletes. + +kapp_inspect $after_delete + +echo "Performing diff of before install state vs. current..." +if ! diff "$before" "$after_delete"; then + echo "Test failed! App uninstall left garbage behind." + exit 1 +else + echo "Test passed!" +fi diff --git a/pipelines/shared-tasks/run-uninstall-test/task.sh b/pipelines/shared-tasks/run-uninstall-test/task.sh new file mode 100755 index 000000000..31b46435b --- /dev/null +++ b/pipelines/shared-tasks/run-uninstall-test/task.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +export KUBECONFIG="$PWD/cluster-pool/metadata" + +# See https://github.com/concourse/registry-image-resource#in-fetch-the-images-rootfs-and-metadata +export IMAGE_DIGEST=$(cat ci-build-image/digest) +export IMAGE_REPO="$(cat ci-build-image/repository)" + +# Get an absolute path to the test script. +TEST_SCRIPT="$PWD/$TEST_SCRIPT" + +pushd pinniped >/dev/null +"$TEST_SCRIPT" +popd >/dev/null diff --git a/pipelines/shared-tasks/run-uninstall-test/task.yml b/pipelines/shared-tasks/run-uninstall-test/task.yml new file mode 100644 index 000000000..8f8d1dc6d --- /dev/null +++ b/pipelines/shared-tasks/run-uninstall-test/task.yml @@ -0,0 +1,15 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: ci-build-image + - name: cluster-pool +outputs: +params: + TEST_SCRIPT: +run: + path: pinniped-ci/pipelines/shared-tasks/run-uninstall-test/task.sh diff --git a/pipelines/shared-tasks/run-unit-tests/task.sh b/pipelines/shared-tasks/run-unit-tests/task.sh new file mode 100755 index 000000000..6364b8ae2 --- /dev/null +++ b/pipelines/shared-tasks/run-unit-tests/task.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail +go version + +COVERAGE_OUTPUT="$PWD/unit-test-coverage/coverage.txt" +export KUBE_CACHE_MUTATION_DETECTOR=true +export KUBE_PANIC_WATCH_DECODE_ERROR=true + +export GOCACHE="$PWD/cache/gocache" +export GOMODCACHE="$PWD/cache/gomodcache" + +cd pinniped +go test -short -race -coverprofile "${COVERAGE_OUTPUT}" -covermode atomic ./... diff --git a/pipelines/shared-tasks/run-unit-tests/task.yml b/pipelines/shared-tasks/run-unit-tests/task.yml new file mode 100644 index 000000000..201c871e1 --- /dev/null +++ b/pipelines/shared-tasks/run-unit-tests/task.yml @@ -0,0 +1,20 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped + - name: pinniped-ci +outputs: + - name: unit-test-coverage +params: +run: + path: pinniped-ci/pipelines/shared-tasks/run-unit-tests/task.sh +caches: + - path: cache diff --git a/pipelines/shared-tasks/run-verify-codegen/task.sh b/pipelines/shared-tasks/run-verify-codegen/task.sh new file mode 100755 index 000000000..56ff89a09 --- /dev/null +++ b/pipelines/shared-tasks/run-verify-codegen/task.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +go version + +cd pinniped + +versions_file="./hack/lib/kube-versions.txt" + +if [[ -x "$versions_file" ]]; then + echo "could not find $versions_file" + exit 1 +fi + +if ! grep -q -F "${KUBE_MINOR_VERSION}" "$versions_file"; then + echo "WARNING: Could not find minor version ${KUBE_MINOR_VERSION} in $versions_file" + echo "WARNING: This should only happen if this version was recently added or removed by a PR but this job has not been updated yet." + echo "WARNING: Once the PR has been merged to main, please remember to add or remove the appropriate tasks from this job." + echo "WARNING: Skipping codegen verification for this Kube minor version!!" + exit 0 +fi + +KUBE_VERSION="$(grep -F "${KUBE_MINOR_VERSION}" "$versions_file")" + +echo "Using patch version $KUBE_VERSION for codegen..." + +CONTAINED=1 ./hack/lib/verify-codegen.sh "${KUBE_VERSION}" diff --git a/pipelines/shared-tasks/run-verify-codegen/task.yml b/pipelines/shared-tasks/run-verify-codegen/task.yml new file mode 100644 index 000000000..b778377c1 --- /dev/null +++ b/pipelines/shared-tasks/run-verify-codegen/task.yml @@ -0,0 +1,13 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci +outputs: +params: + KUBE_MINOR_VERSION: +run: + path: pinniped-ci/pipelines/shared-tasks/run-verify-codegen/task.sh diff --git a/pipelines/shared-tasks/run-verify-go-generate/task.sh b/pipelines/shared-tasks/run-verify-go-generate/task.sh new file mode 100755 index 000000000..56ddc205e --- /dev/null +++ b/pipelines/shared-tasks/run-verify-go-generate/task.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +go version + +cd pinniped + +echo "Running 'go generate ./...'" +go generate ./... +echo + +diffs=$(git --no-pager diff) +if [[ "$diffs" == "" ]]; then + echo "Running 'go generate ./...' did not cause any diffs. Done." + exit 0 +fi + +echo "Running 'go generate ./...' caused the following diffs:" +echo +echo "$diffs" +echo +echo "Please resolve these diffs, for example by running 'go generate ./...' and committing the changes." + +exit 1 diff --git a/pipelines/shared-tasks/run-verify-go-generate/task.yml b/pipelines/shared-tasks/run-verify-go-generate/task.yml new file mode 100644 index 000000000..1e9303421 --- /dev/null +++ b/pipelines/shared-tasks/run-verify-go-generate/task.yml @@ -0,0 +1,17 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped + - name: pinniped-ci +outputs: +params: +run: + path: pinniped-ci/pipelines/shared-tasks/run-verify-go-generate/task.sh diff --git a/pipelines/shared-tasks/run-verify-go-mod-tidy/task.sh b/pipelines/shared-tasks/run-verify-go-mod-tidy/task.sh new file mode 100755 index 000000000..41b90d28c --- /dev/null +++ b/pipelines/shared-tasks/run-verify-go-mod-tidy/task.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +go version + +cd pinniped + +echo "Running 'module.sh tidy'" +./hack/module.sh tidy +echo + +diffs=$(git --no-pager diff) +if [[ "$diffs" == "" ]]; then + echo "Running 'module.sh tidy' did not cause any diffs. Done." + exit 0 +fi + +echo "Running 'module.sh tidy' caused the following diffs:" +echo +echo "$diffs" +echo +echo "Please resolve these diffs, for example by running 'module.sh tidy' and committing the changes." + +exit 1 diff --git a/pipelines/shared-tasks/run-verify-go-mod-tidy/task.yml b/pipelines/shared-tasks/run-verify-go-mod-tidy/task.yml new file mode 100644 index 000000000..257de3890 --- /dev/null +++ b/pipelines/shared-tasks/run-verify-go-mod-tidy/task.yml @@ -0,0 +1,17 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped + - name: pinniped-ci +outputs: +params: +run: + path: pinniped-ci/pipelines/shared-tasks/run-verify-go-mod-tidy/task.sh diff --git a/pipelines/shared-tasks/run-verify-lint/task.sh b/pipelines/shared-tasks/run-verify-lint/task.sh new file mode 100755 index 000000000..3356ef33c --- /dev/null +++ b/pipelines/shared-tasks/run-verify-lint/task.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +cd pinniped + +if [[ "${SKIP_INSTALL_GOLANGCI_LINT:-false}" != "true" ]]; then + golangci_lint_version=$(cat ./hack/lib/lint-version.txt) + curl -sfLo /tmp/golangci-lint.tar.gz \ + https://github.com/golangci/golangci-lint/releases/download/v${golangci_lint_version}/golangci-lint-${golangci_lint_version}-linux-amd64.tar.gz + + tar -C /tmp --strip-components=1 -xzvf /tmp/golangci-lint.tar.gz + + mv /tmp/golangci-lint /usr/local/bin/golangci-lint + chmod +x /usr/local/bin/golangci-lint +fi + +if grep --extended-regexp '\.Focus\(' --include '*_test.go' --recursive .; then + echo "ERROR: Found focused unit test(s) committed to git. This is almost certainly a mistake." + exit 1 +fi + +./hack/module.sh lint + +echo "finished" diff --git a/pipelines/shared-tasks/run-verify-lint/task.yml b/pipelines/shared-tasks/run-verify-lint/task.yml new file mode 100644 index 000000000..378b86ad4 --- /dev/null +++ b/pipelines/shared-tasks/run-verify-lint/task.yml @@ -0,0 +1,18 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped + - name: pinniped-ci +outputs: +params: + SKIP_INSTALL_GOLANGCI_LINT: "false" +run: + path: pinniped-ci/pipelines/shared-tasks/run-verify-lint/task.sh diff --git a/pipelines/shared-tasks/scan-image-trivy/task.yml b/pipelines/shared-tasks/scan-image-trivy/task.yml new file mode 100644 index 000000000..3da828d90 --- /dev/null +++ b/pipelines/shared-tasks/scan-image-trivy/task.yml @@ -0,0 +1,37 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: ghcr.io/aquasecurity/trivy +inputs: + - name: image +outputs: +params: + GITHUB_TOKEN: + # For format see https://aquasecurity.github.io/trivy/v0.24.2/vulnerability/examples/filter/#by-vulnerability-ids + IGNORE_VULNERABILITY_IDS: "" +run: + path: ash + args: + - -c + - | + set -euo pipefail + + cat <.trivyignore + ${IGNORE_VULNERABILITY_IDS} + EOF + + echo ".trivyignore file contents:" + cat .trivyignore + echo + + trivy image \ + --input=image/image.tar \ + --exit-code=1 \ + --severity=HIGH,CRITICAL \ + --ignore-unfixed \ + --timeout=10m0s diff --git a/pipelines/shared-tasks/template-deployment-yamls/task.sh b/pipelines/shared-tasks/template-deployment-yamls/task.sh new file mode 100755 index 000000000..f00522bfa --- /dev/null +++ b/pipelines/shared-tasks/template-deployment-yamls/task.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +pinniped_version="v$(cat release-semver/version)" + +# Use an image_tag with both the actual tag (human-readable) and the digest (to statically pin the contents). +# When both are specified, the image tag ends up acting like a comment to help the user understand what version +# they have installed, but does not affect which image is pulled. +image_digest="$(cat ci-build-image/digest)" +image_tag="${pinniped_version}@${image_digest}" +echo "Using image_repo=${IMAGE_REPO} with image_tag=${image_tag}" + +# Note that this is assuming that all required options in the values.yaml files have good defaults. +echo "Templating install-pinniped-concierge.yaml..." +pushd pinniped/deploy/concierge >/dev/null +ytt --file . --data-value "image_repo=${IMAGE_REPO}" --data-value "image_tag=${image_tag}" >../../../deployment-yamls/install-pinniped-concierge.yaml +popd >/dev/null + +# Create a subset of the Concierge YAML containing only the CRDs, the namespace, and the ServiceAccount (for use with kubectl apply and kubectl delete). +echo "Templating install-pinniped-concierge-crds.yaml..." +yq eval 'select(.kind == "CustomResourceDefinition" or .kind == "Namespace" or .kind == "ServiceAccount")' deployment-yamls/install-pinniped-concierge.yaml >deployment-yamls/install-pinniped-concierge-crds.yaml + +# Create a subset with everything that isn't in the other yaml file (for kubectl apply and kubectl delete) +echo "Templating install-pinniped-concierge-resources.yaml" +yq eval 'select(.kind != "CustomResourceDefinition" and .kind != "Namespace" and .kind != "ServiceAccount")' deployment-yamls/install-pinniped-concierge.yaml >deployment-yamls/install-pinniped-concierge-resources.yaml + +echo "Templating install-pinniped-supervisor.yaml..." +pushd pinniped/deploy/supervisor >/dev/null +ytt --file . --data-value "image_repo=${IMAGE_REPO}" --data-value "image_tag=${image_tag}" >../../../deployment-yamls/install-pinniped-supervisor.yaml +popd >/dev/null + +echo "Templating install-local-user-authenticator.yaml..." +pushd pinniped/deploy/local-user-authenticator >/dev/null +ytt --file . --data-value "image_repo=${IMAGE_REPO}" --data-value "image_tag=${image_tag}" >../../../deployment-yamls/install-local-user-authenticator.yaml +popd >/dev/null diff --git a/pipelines/shared-tasks/template-deployment-yamls/task.yml b/pipelines/shared-tasks/template-deployment-yamls/task.yml new file mode 100644 index 000000000..f10e5b00a --- /dev/null +++ b/pipelines/shared-tasks/template-deployment-yamls/task.yml @@ -0,0 +1,16 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: release-semver + - name: ci-build-image +outputs: + - name: deployment-yamls +params: + IMAGE_REPO: +run: + path: pinniped-ci/pipelines/shared-tasks/template-deployment-yamls/task.sh diff --git a/pipelines/shared-tasks/update-homebrew-formula/task.sh b/pipelines/shared-tasks/update-homebrew-formula/task.sh new file mode 100755 index 000000000..871b2a977 --- /dev/null +++ b/pipelines/shared-tasks/update-homebrew-formula/task.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -xeuo pipefail + +# Get current revision from 'revision: "abc123"'. +current_revision="$(awk '/revision:/ { gsub(/"/, "", $2); print($2); }' homebrew-pinniped-in/pinniped-cli.rb)" + +# Get current version components from 'tag: "vx.y.z",'. +current_version="$(awk '/tag:/ { sub(/"v/, "", $2); sub(/",/, "", $2); print($2); }' homebrew-pinniped-in/pinniped-cli.rb)" + +new_tag="$(cat github-release/tag)" +new_revision="$(cat github-release/commit_sha)" + +# Get new version components from 'vx.y.z'. +new_version="$(echo "$new_tag" | sed -e 's/^v//')" + +# Update formula, if necessary. +cp -a homebrew-pinniped-in/* homebrew-pinniped-in/.git homebrew-pinniped-out +if [[ "$current_revision" != "$new_revision" ]]; then + sed \ + -e "s/$current_version/$new_version/" \ + -e "s/$current_revision/$new_revision/" \ + homebrew-pinniped-in/pinniped-cli.rb \ + > homebrew-pinniped-out/pinniped-cli.rb + + cd homebrew-pinniped-out + apt update >/dev/null + apt install git -y >/dev/null + git config user.email "pinniped-ci-bot@users.noreply.github.com" + git config user.name "Pinny" + git commit -a -m "pinniped-cli.rb: update to $new_version" +fi diff --git a/pipelines/shared-tasks/update-homebrew-formula/task.yml b/pipelines/shared-tasks/update-homebrew-formula/task.yml new file mode 100644 index 000000000..9ebe36b94 --- /dev/null +++ b/pipelines/shared-tasks/update-homebrew-formula/task.yml @@ -0,0 +1,18 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: debian + tag: 10.8 +inputs: + - name: pinniped-ci + - name: github-release + - name: homebrew-pinniped-in +outputs: + - name: homebrew-pinniped-out +run: + path: pinniped-ci/pipelines/shared-tasks/update-homebrew-formula/task.sh diff --git a/pipelines/shared-tasks/update-version-and-cli-docs/task.sh b/pipelines/shared-tasks/update-version-and-cli-docs/task.sh new file mode 100755 index 000000000..9a49784fb --- /dev/null +++ b/pipelines/shared-tasks/update-version-and-cli-docs/task.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +pinniped_tag="$(cat github-final-release/tag)" + +# Copy everything to output. +git clone ./pinniped-in ./pinniped-out + +# The target file within the Pinniped repo. +clidoc="site/content/docs/reference/cli.md" + +# Run the hidden pinniped CLI command for this latest release. +chmod +x github-final-release/pinniped-cli-linux-amd64 +github-final-release/pinniped-cli-linux-amd64 generate-markdown-help >"pinniped-out/$clidoc" + +configdoc="site/config.yaml" + +kube_version="$(cat ./pinniped-in/hack/lib/kube-versions.txt | grep -v '^#' | head -1 | cut -d"." -f1-2)" +if ! echo "$kube_version" | grep -Eq '^[0-9]+\.[0-9]+$'; then + echo "bad version format, should be X.Y: $kube_version" + exit 1 +fi + +echo "Installing yq..." +curl --retry-connrefused --retry 5 -fLo /usr/local/bin/yq https://github.com/mikefarah/yq/releases/download/v4.40.4/yq_linux_amd64 +chmod +x /usr/local/bin/yq + +# cd to the output repo. +cd pinniped-out + +# Edit the config.yaml file in the output repo. +pinniped_tag="$pinniped_tag" yq eval '.params.latest_version = env(pinniped_tag)' --inplace "$configdoc" +kube_version="$kube_version" yq eval '.params.latest_codegen_version = env(kube_version)' --inplace "$configdoc" + +# Prepare to commit in the output repo. +git config user.email "pinniped-ci-bot@users.noreply.github.com" +git config user.name "Pinny" + +# Only add the files that we think should have changed, just in case other files changed somehow. +git add "$clidoc" +git add "$configdoc" + +# Print the current status to the log. +git status + +# Did we just stage any changes? +staged=$(git --no-pager diff --staged) +if [[ "$staged" == "" ]]; then + # Nothing to commit. + echo "No changes to $clidoc or $configdoc found. Skipping git commit." +else + # Show diff for the log. + echo "Found changes for $clidoc or $configdoc:" + echo + echo "$staged" + echo + # Commit. + echo "Committing changes." + git commit -m "Updated versions in docs for $pinniped_tag release" +fi diff --git a/pipelines/shared-tasks/update-version-and-cli-docs/task.yml b/pipelines/shared-tasks/update-version-and-cli-docs/task.yml new file mode 100644 index 000000000..70ea25bd7 --- /dev/null +++ b/pipelines/shared-tasks/update-version-and-cli-docs/task.yml @@ -0,0 +1,18 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +image_resource: + type: registry-image + source: + repository: golang + tag: '1.23.2' +inputs: + - name: pinniped-ci + - name: github-final-release + - name: pinniped-in +outputs: + - name: pinniped-out +run: + path: pinniped-ci/pipelines/shared-tasks/update-version-and-cli-docs/task.sh diff --git a/pipelines/shared-tasks/upload-test-coverage/task.sh b/pipelines/shared-tasks/upload-test-coverage/task.sh new file mode 100755 index 000000000..8a83e3e65 --- /dev/null +++ b/pipelines/shared-tasks/upload-test-coverage/task.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail +COVERAGE_OUTPUT="$PWD/unit-test-coverage/coverage.txt" +cd pinniped +codecov -t ${CODECOV_TOKEN} -f "${COVERAGE_OUTPUT}" diff --git a/pipelines/shared-tasks/upload-test-coverage/task.yml b/pipelines/shared-tasks/upload-test-coverage/task.yml new file mode 100644 index 000000000..ad90ebc51 --- /dev/null +++ b/pipelines/shared-tasks/upload-test-coverage/task.yml @@ -0,0 +1,13 @@ +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +platform: linux +inputs: + - name: pinniped + - name: pinniped-ci + - name: unit-test-coverage +params: + CODECOV_TOKEN: +run: + path: pinniped-ci/pipelines/shared-tasks/upload-test-coverage/task.sh diff --git a/pipelines/update-all-pipelines.sh b/pipelines/update-all-pipelines.sh new file mode 100755 index 000000000..14f5998ec --- /dev/null +++ b/pipelines/update-all-pipelines.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -eu + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +failed_scripts=() + +for update_pipeline_script in $(find "$script_dir" -name update-pipeline.sh); do + echo "Running $update_pipeline_script..." + set +e + $update_pipeline_script + if [[ $? -ne 0 ]]; then + failed_scripts+="$update_pipeline_script" + fi + set -e + echo +done + +for failed_script in ${failed_scripts:-}; do + echo "FAILED: ${failed_script}" +done + +if [ ${#failed_scripts[@]} -ne 0 ]; then + exit 1 +fi