Move CI to a new internal Concourse

This commit is contained in:
Ryan Richard
2025-06-20 16:01:03 -07:00
parent 80e5eb5b56
commit d5ff2f4447
29 changed files with 326 additions and 338 deletions

View File

@@ -31,7 +31,7 @@ The release checklist is committed to this repo as an [issue template](https://g
## Pipelines
Pinniped uses [Concourse](https://concourse-ci.org) for CI/CD.
Our Concourse can be found at [ci.pinniped.dev](https://ci.pinniped.dev).
We are currently running our Concourse on a network that can only be reached from inside the corporate network at [ci.pinniped.broadcom.net](https://ci.pinniped.broadcom.net).
The following pipelines are implemented in this branch. Not all pipelines are necessarily publicly visible, although our goal is to make them all visible.

View File

@@ -1,11 +1,11 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Some global fly config.
#
export FLY_CLI=/usr/local/bin/fly
export CONCOURSE_URL=https://ci.pinniped.dev
export CONCOURSE_URL=https://ci.pinniped.broadcom.net
export CONCOURSE_TEAM=main
export CONCOURSE_TARGET=pinniped
export ROOT_DIR

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
@@ -16,6 +16,11 @@ if [[ ! -f "$FLY_CLI" ]]; then
chmod 755 "$FLY_CLI"
fi
if $FLY_CLI targets | grep ^"$CONCOURSE_TARGET" | grep -q 'https://ci\.pinniped\.dev'; then
# The user has the old ci.pinniped.dev target. Remove it so we can replace it.
$FLY_CLI delete-target --target "$CONCOURSE_TARGET"
fi
if ! $FLY_CLI targets | tr -s ' ' | cut -f1 -d ' ' | grep -q "$CONCOURSE_TARGET"; then
# Create the target if needed
$FLY_CLI --target "$CONCOURSE_TARGET" login \

View File

@@ -25,11 +25,13 @@ script must be used to auto-generate some values and store them in a new secret
This script only needs to be run once.
1. Create a github oauth client as described in https://concourse-ci.org/github-auth.html.
The callback URI should be set to `https://ci.pinniped.dev/sky/issuer/callback`.
The callback URI should be set to `https://ci.pinniped.broadcom.net/sky/issuer/callback`.
Take note of the client ID and client secret for use in the next step.
2. Run `GITHUB_CLIENT_ID=<your_client_id> GITHUB_CLIENT_SECRET=<your_client_secret> ./bootstrap-secrets.sh`.
This will create a secret in the GCP Secrets Manager which includes the GitHub client info
along with some auto-generated secrets.
3. If you need to change the GitHub client's ID or secret later, edit the secret in GCP Secrets Manager,
and then redeploy the web deployment.
## Web Deployment
@@ -56,5 +58,5 @@ To upgrade each deployment to a new version of Concourse:
back to its default number of replicas.
1. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh)
2. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-internal-workers.sh)
3. Commit and push those script changes.
3. Commit and push those script changes.
4. Trigger the CI jobs to scale the internal workers back to the desired number as needed.

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
@@ -66,7 +66,7 @@ CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
WEB_IP_ADDRESS=$(yq eval '.web-ip.value' "$TERRAFORM_OUTPUT_FILE")
WEB_HOSTNAME=$(yq eval '.web-hostname.value' "$TERRAFORM_OUTPUT_FILE")
WEB_HOSTNAME="ci.pinniped.broadcom.net"
DB_IP_ADDRESS=$(yq eval '.database-ip.value' "$TERRAFORM_OUTPUT_FILE")
DB_USERNAME=$(yq eval '.database-username.value' "$TERRAFORM_OUTPUT_FILE")
DB_PASSWORD=$(yq eval '.database-password.value' "$TERRAFORM_OUTPUT_FILE")
@@ -83,9 +83,18 @@ chmod 0600 "$KUBECONFIG"
BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml"
gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE"
# Download the TLS cert for ci.pinniped.broadcom.net which was manually added as a secret.
TLS_SECRETS_FILE="$DEPLOY_TEMP_DIR/tls-cert.yaml"
gcloud secrets versions access latest --secret="ci-pinniped-broadcom-net-tls-cert" --project "$PROJECT" >"$TLS_SECRETS_FILE"
TLS_CERT="$(yq eval '."cert.pem"' "$TLS_SECRETS_FILE")"
TLS_KEY="$(yq eval '."key.pem"' "$TLS_SECRETS_FILE")"
# Dump out the cluster info for diagnostic purposes.
kubectl cluster-info
# Configure ip-masq-agent to allow the pods to reach the private IP of the Cloud SQL server.
kubectl apply -f "$script_dir/web/ip-masq-agent-configmap.yaml"
# Some of the configuration options used below were inspired by how HushHouse runs on GKE.
# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/hush-house/values.yaml
@@ -111,6 +120,8 @@ helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \
--set secrets.postgresCaCert="$DB_CA_CERT" \
--set secrets.postgresClientCert="$DB_CLIENT_CERT" \
--set secrets.postgresClientKey="$DB_CLIENT_KEY" \
--set secrets.webTlsCert="$TLS_CERT" \
--set secrets.webTlsKey="$TLS_KEY" \
--post-renderer "$script_dir/web/ytt-helm-postrender-web.sh"
# By default, it will not be possible for the autoscaler to scale down to one node.

View File

@@ -1,4 +1,4 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-worker".
@@ -13,7 +13,7 @@ postgresql:
worker:
# In an effort to save money, default to 1 worker.
replicas: 1
nodeSelector: { cloud.google.com/gke-nodepool: workers-2 } # the name of the nodepool from terraform
nodeSelector: { cloud.google.com/gke-nodepool: workers-1 } # the name of the nodepool from terraform
hardAntiAffinity: true
minAvailable: 0
terminationGracePeriodSeconds: 3600
@@ -28,7 +28,7 @@ worker:
# searches using the more commonly used names for those units, e.g. searching "29061248 KiB to GiB".
#
# Limit to using all available CPUs and most of the available memory in our e2-standard-8 VM nodes.
# According to the "Allocatable" section of the "kubectl describe nodes -l cloud.google.com/gke-nodepool=workers-2" output,
# According to the "Allocatable" section of the "kubectl describe nodes -l cloud.google.com/gke-nodepool=workers-1" output,
# each node has 29061248 Ki, which is equal to 27.7149658203 Gi of memory allocatable,
# and each node has 7910m cpu allocatable.
#

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
@@ -16,10 +16,10 @@ fi
CLUSTER="pinniped-concourse"
PROJECT="$PINNIPED_GCP_PROJECT"
ZONE="us-central1-c"
ZONE="us-west1-c"
STATEFULSET="concourse-worker"
NAMESPACE="concourse-worker"
NODEPOOL="workers-2"
NODEPOOL="workers-1"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
gcloud auth activate-service-account \

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
@@ -14,13 +14,19 @@ if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
exit 1
fi
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Define some env vars
source "$script_dir/../../hack/fly-helpers.sh"
# Setup and login if needed
"$script_dir/../../hack/setup-fly.sh"
CLUSTER="pinniped-concourse"
PROJECT="$PINNIPED_GCP_PROJECT"
ZONE="us-central1-c"
ZONE="us-west1-c"
STATEFULSET="concourse-worker"
NAMESPACE="concourse-worker"
NODEPOOL="workers-2"
TARGET="pinniped"
NODEPOOL="workers-1"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
gcloud auth activate-service-account \
@@ -79,10 +85,7 @@ kubectl get nodes \
echo
echo "Current fly workers..."
if ! fly --target "$TARGET" status >/dev/null; then
fly --target "$TARGET" login
fi
fly --target "$TARGET" workers
$FLY_CLI --target "$CONCOURSE_TARGET" workers
echo ""
echo "Note: If the number of pods, nodes, and fly workers are not all the same,"

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
@@ -16,10 +16,10 @@ fi
CLUSTER="pinniped-concourse"
PROJECT="$PINNIPED_GCP_PROJECT"
ZONE="us-central1-c"
ZONE="us-west1-c"
STATEFULSET="concourse-worker"
NAMESPACE="concourse-worker"
NODEPOOL="workers-2"
NODEPOOL="workers-1"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
gcloud auth activate-service-account \

View File

@@ -0,0 +1,12 @@
# see internal doc https://bsg-confluence.broadcom.net/pages/viewpage.action?pageId=689720737
apiVersion: v1
kind: ConfigMap
metadata:
name: ip-masq-agent
namespace: kube-system
data:
# 240.0.0.0/4 is needed to allow the pod to reach the Cloud SQL server's private IP.
config: |
nonMasqueradeCIDRs:
- 240.0.0.0/4
resyncInterval: 60s

View File

@@ -1,4 +1,4 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-web".
@@ -27,8 +27,12 @@ web:
service:
api:
type: LoadBalancer
annotations:
networking.gke.io/load-balancer-type: "Internal"
workerGateway:
type: LoadBalancer
annotations:
networking.gke.io/load-balancer-type: "Internal"
# The first node in the generic-1 nodepool (using e2-highcpu-8 VM) has lots of GKE and Kubernetes pods running on it.
# According to the "allocatable" section of the "kubectl get node -o yaml" output, the first node has
# 7910m cpu and 6179084 Ki memory (which is about 5.893 Gi).
@@ -67,7 +71,10 @@ concourse:
# Because it was created in the org, it should have permissions to read team memberships during a login.
# The client ID and client secret are stored in the bootstrap secret in the Secrets Manager
# (see infra/README.md for more info about the bootstrap secret).
team: vmware-tanzu:pinniped-owners
# TODO: this needs to change to be the team in the vmware org. Also need to change the clientID and clientSecret in the concourse-install-bootstrap GCP secret for one in the vmware org.
# team: vmware-tanzu:pinniped-owners
# Temporarily just list which specific users are admins instead.
user: cfryanr,joshuatcasey
github:
enabled: true
bindPort: 80
@@ -94,9 +101,6 @@ concourse:
enabled: true
kubernetes:
keepNamespaces: true
letsEncrypt:
enabled: true
acmeURL: "https://acme-v02.api.letsencrypt.org/directory"
tls:
enabled: true
bindPort: 443

View File

@@ -2,63 +2,60 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/google" {
version = "5.11.0"
constraints = "~> 5.0"
version = "6.40.0"
constraints = "~> 6.0"
hashes = [
"h1:Ezg3fsY84CB/2P00ZwQEECuIfJd6UUYs5tIptN2kzsE=",
"h1:FV7t+G3+rJD3aN5Yr+FY8/cDG+FKhFCt8XvLJkqCcY8=",
"zh:444815a900947de3cb4e3aac48bf8cd98009130c110e3cee1e72698536046fee",
"zh:45ca22a2f44fe67f9ff71528dcd93493281e34bff7791f5eb24c86e76f32956d",
"zh:53e2e33824743e9e620454438de803de10572bd79ce16034abfc91ab1877be7a",
"zh:5eb699830a07320f896a3da7cdee169ab5fa356a6d38858b8b9337f1e4e30904",
"zh:6837cd8d9d63503e138ec3ebf52f850ca786824a3b0d5b9dfecec303f1656ca6",
"zh:7adde1fe2fc8966812bcbfeb24580cbb53f2f5301bd793eaa70ad753ba6b2d3c",
"zh:92052fd7ec776cd221f19db4624ae4ed1550c95c2984c9f3b6c54cea8896812b",
"zh:b0305aab81220b7d5711225224f5baad8fc6f5dd3a8199073966af8a151e2932",
"zh:e7b5aa624d89664803dd545f261261806b7f6607c19f6ceaf61f9011b0e02e63",
"h1:wCQBpao7//BaEDQLdmqfcHlTqABT7BeeKdPJrf8V21w=",
"zh:0c304517a2a26f78d058491a2041088dcd4dec9207219ca75a644e734e8394a8",
"zh:2df309e86e0d2edc65099e0e47bc9bc91172dce62e59d579dc5132337719d7f8",
"zh:4dfb3c5775dcae2f93f3e9affe52a2987aba76b35844883b188d236f5fb485d0",
"zh:5943c1fe00bbd63c5be3813c16ba225ca10b1d694e8ead0e8fc4ebd54e9d0b9c",
"zh:6ed84e95400f4e27b32fa56832ea47a350cbe581fbae76f5ddabf98f18f44f02",
"zh:77bccedaf8fd1807a8020baf422897e5487f5f182b13ee29a6e8c58024ee22be",
"zh:9e486f71a714f10cd0d0c0df04d4a8f2cd5c33518131a214b11f3624c683ea10",
"zh:c4598d6c6595e8a1cdd637ffc9af4381cb1cb856f9c14ea5dcc675378b01cca6",
"zh:dcba35d7cd1793b6ca2ef63ccd1737ce669b31d14161f0a0f2e3aa8d0d5c8793",
"zh:ed661f2c233bcd56360731f7f21dca8a94f58ec27f4e3b468d27711938812146",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fbc04244e1f666ce0320b4eb0efb9cae460a5d688fc039637c8fe745665c19e5",
"zh:ff3553298929629ae2ad77000b3e050394e2f00c04e90a24268e3dfe6a6342c4",
"zh:fe09c7cb7a448aab121bad8ca857acbf33e00bbc0c2b25824c71ff3be2e629e4",
]
}
provider "registry.terraform.io/hashicorp/google-beta" {
version = "5.11.0"
constraints = "~> 5.0"
version = "6.40.0"
constraints = "~> 6.0"
hashes = [
"h1:izjzT8NnaePEXKbLQa+D4gw7HUYvK7NgIL3TJ23rjZk=",
"h1:teaW5i4Za+IHUuYSg3mRwJwVdLwKbND9UdCwG4MBvkY=",
"zh:0efa82e6fe2c83bd5280c3009db1c3acc9cdad3c9419b6ec721fbefc9f832449",
"zh:371df01e4f38b828195d115c9a8bebddebec4d34e9ef74cf3a79161da08e44b2",
"zh:5089967c420c5e4a4ba0d4c8c6ca344c7bb2476ec928f8319856260eacded369",
"zh:798a65c79386d356d6a097de680f4ece8982daae1cb0e10d6c53b383efef45f0",
"zh:90178911ac0e624c69a54a992fb3425ef09fdfb3e34b496ad7b6e168e80d4e0c",
"zh:b59c60f8479b8f0c8e91a93a4e707ce6d17c8e50e2f5afaf1d9a03c03cfedbf8",
"zh:c7f946282d80223ab3a6b284c22e4b53ffcd7b1a02449bb95a350007f30c87dc",
"zh:cd60e76987c2fdce2c84219eaff9390cd135f88aa9a27bc4d79a8fd4a8d09622",
"zh:de06bfa0393206c0253ebdea70821cb3b08ef87d5d4844be3ae463abfb4e1884",
"zh:de494bad600cca78986ce63d1018f5dbc1a1fcc2d4c41c94c15d5346f2b0dd1e",
"h1:R5yc207FnSQvJxNJwVD6xo0wwXPWI+CsgxXak+skiBs=",
"zh:003d3bfd2a39a950e7e5865e5b74a630594710a21990f892c3fb4c9193f532b0",
"zh:0f1e455cc73e288c8e047dd4587bc0ec7389855d4a949c853adcbf0a4aa19bb2",
"zh:12be1e25e2c51c8fb8dee0f4ed3bb43706b073027a895c6794c2755cbbc05a18",
"zh:3688208f155ea04dbfa3ba08d761cd3ae4ba342d8e5fdb65a659f1d72a8d8fc7",
"zh:4a71281ca84e3ab028a89935779b7cc6417ec9a54da5233a52fa5a062235fc61",
"zh:5c4798d3265d1768c18b8376663e1642c0ad5c554f6670633938b570eee4f6b8",
"zh:64e8d57530352b87480f22efd3cf7c4bca40e8c8fb60118615af761f3c480d6b",
"zh:7a6ebb211ea05acab41bd9f0039155e618f783bc0462d708a7e6c30827dcf644",
"zh:978524cb2a1ceab019232f66e29eed5b4bbc70ba71837c824935a139b86010d4",
"zh:9cad3dbf1b98ae30a5c27b10c7a6c85ebce9fb3332a65ac868e3499664883d26",
"zh:f0da73f9d9d53d499b69f11421a56fd48ba6aff98b33ba1fe2bf4c4cf0f917f1",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:f97a8b6e83e0083dcb42a87e8e418ab33f12d641f9cdfdc92d154ba7fd7398fb",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
version = "3.7.2"
hashes = [
"h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=",
"h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=",
"zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f",
"zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc",
"zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab",
"zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3",
"zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212",
"zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
"zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34",
"zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967",
"zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d",
"zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62",
"zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0",
]
}

View File

@@ -7,22 +7,40 @@ NOTE: Do not manually edit these resources using the Google Cloud UI, API, or CL
Instead, please update the `.tf` files and follow the below steps again.
To run Terraform to create or update the infrastructure:
1. Install the `gcloud` CLI and authenticate as yourself, if you haven't already.
2. Use `gcloud auth application-default login` if you haven't already. This is not optional.
3. Install terraform if you haven't already. Use brew or brew install tfenv and then use tfenv.
At the time of writing this README, we were using Terraform v1.6.6.
4. cd into this directory: `cd infra/terraform/gcloud`
5. Run `terraform init`, if you haven't already for this directory.
6. Run `terraform fmt`.
7. Run `terraform validate`.
8. Run `TF_VAR_project=$PINNIPED_GCP_PROJECT terraform apply`.
1. If running for the first time ever, log in to the GCP Console for the project and
create the GCS storage bucket where terraform will save its state (see [gcp.tf](gcp.tf) for the bucket name).
Creating the bucket in one region (see [variables.tf](variables.tf) for the region name)
with otherwise default options should suffice.
2. Install the `gcloud` CLI and authenticate as yourself using `gcloud auth login`, if you haven't already.
3. Use `gcloud auth application-default login` if you haven't already. This is not optional. If you forget this step,
terraform will complain that it cannot read the state from the GCP bucket file.
4. Install terraform if you haven't already. Use brew to install terraform,
or use `brew install tfenv` and then use tfenv to install Terraform.
At the time of last updating this README, we were using Terraform v1.12.2.
5. cd into this directory: `cd infra/terraform/gcloud`
6. Run `TF_VAR_project=$PINNIPED_GCP_PROJECT terraform init`, if you haven't already for this directory.
This assumes that you have already exported an env var called `PINNIPED_GCP_PROJECT`
whose value is the name of the GCP project.
7. Run `terraform fmt`.
8. Run `terraform validate`.
9. Run
`TF_VAR_project=$PINNIPED_GCP_PROJECT TF_VAR_sharedVPCProject=$VPC_PROJECT TF_VAR_networkName=$VPC_NAME TF_VAR_concourseSubnetName=$SUBNET_NAME terraform plan`.
This assumes that you have already exported an env var called `PINNIPED_GCP_PROJECT`
whose value is the name of the GCP project, along with `VPC_PROJECT` which is the name
of another GCP project which is sharing a VPC network to our project, `VPC_NAME` which is
the name of that shared VPC, and `SUBNET_NAME` which is the name of a subnet from that
shared VPC that we want to give to our Concourse GKE cluster.
This command is a dry-run which will print what the `apply` command would perform.
10. If you are happy with the output of `terraform plan`, then run
`TF_VAR_project=$PINNIPED_GCP_PROJECT TF_VAR_sharedVPCProject=$VPC_PROJECT TF_VAR_networkName=$VPC_NAME TF_VAR_concourseSubnetName=$SUBNET_NAME terraform apply`
to really create/update/delete the resources.
If you do not need to run `terraform apply` because someone else has already done that,
then you still need to follow the above directions up to and including running `terraform init`
to set up terraform on your computer.
To delete the entire Concourse deployment and all its related cloud infrastructure,
use `terraform destroy`. There is no way to undo this action. This will also delete the Cloud SQL
database which contains all CI job history.
To delete the entire Concourse deployment and all its related cloud infrastructure, use `terraform destroy`.
You may need to use `terraform apply` to set `deletion_protection=false` on some resources first (see Terraform docs).
There is no way to undo `terraform destroy`. This will also delete the Cloud SQL database which contains all CI job
history.

View File

@@ -1,26 +1,26 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Use our pre-existing DNS zone.
data "google_dns_managed_zone" "main" {
name = var.dns-zone
# "data" reads a pre-existing resource without trying to manage its state.
# This subnet is shared with us from another GCP project.
data "google_compute_subnetwork" "existing_subnet_for_concourse" {
project = var.sharedVPCProject
name = var.concourseSubnetName
}
# Reserved external static IPv4 address for the `web` instances.
# This is needed so that we can have a static IP for `ci.pinniped.dev`.
# Reserved internal static IPv4 address for the `web` instances.
# This is needed so that we can have a static IP for `ci.pinniped.broadcom.net`.
resource "google_compute_address" "main" {
name = "${var.subdomain}-${var.dns-zone}"
}
# Make a DNS A record for our subdomain to point at our new static IP.
resource "google_dns_record_set" "main" {
name = "${var.subdomain}.${data.google_dns_managed_zone.main.dns_name}"
type = "A"
ttl = 300
managed_zone = data.google_dns_managed_zone.main.name
rrdatas = [
google_compute_address.main.address,
]
name = "ci-pinniped-dev"
description = "static IP address reserved for Concourse web interface"
subnetwork = data.google_compute_subnetwork.existing_subnet_for_concourse.id
address_type = "INTERNAL"
# Allow it to be shared by multiple load balancers (each with different ports).
# We will have one for web and one for web-worker-gateway.
purpose = "SHARED_LOADBALANCER_VIP"
# Manually picked an IP from the range that did not cause an error when entered
# into GCP's "VPC Network / IP address / Reserve internal static IP" UI for this subnet.
address = "10.31.141.90"
}

View File

@@ -1,10 +1,6 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "ip" {
value = google_compute_address.main.address
}
output "hostname" {
value = trimsuffix(google_dns_record_set.main.name, ".")
}

View File

@@ -1,12 +1,12 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "dns-zone" {
description = "Name of the DNS zone"
variable "sharedVPCProject" {
description = "Name of the GCP project which contains the shared VPC."
type = string
}
variable "subdomain" {
description = "Subdomain under the DNS zone to register"
variable "concourseSubnetName" {
description = "Name of the GCP subnet to use for concourse."
type = string
}

View File

@@ -1,32 +1,32 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
module "vpc" {
source = "./vpc"
name = var.name
region = var.region
vms-cidr = "10.10.0.0/16"
pods-cidr = "10.11.0.0/16"
services-cidr = "10.12.0.0/16"
# "data" reads a pre-existing resource without trying to manage its state.
data "google_compute_network" "existing_network" {
project = var.sharedVPCProject
name = var.networkName
}
resource "google_service_account" "default" {
account_id = "${var.name}-sa"
display_name = "GKE Node SA for ${var.name}"
# This subnet is shared with us from another GCP project.
data "google_compute_subnetwork" "existing_subnet" {
project = var.sharedVPCProject
name = var.subnetName
}
data "google_service_account" "default" {
account_id = "terraform"
}
# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster
resource "google_container_cluster" "main" {
# Allow "terraform destroy" for this cluster.
deletion_protection = false
# deletion_protection = false
name = var.name
location = var.zone
network = module.vpc.name
subnetwork = module.vpc.subnet-name
network = data.google_compute_network.existing_network.id
subnetwork = data.google_compute_subnetwork.existing_subnet.id
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. This allows node pools to be added and removed without recreating the cluster.
@@ -34,11 +34,24 @@ resource "google_container_cluster" "main" {
remove_default_node_pool = true
initial_node_count = 1
min_master_version = "1.30.4-gke.1348000"
min_master_version = "1.32.2-gke.1297002"
# Settings for a private cluster.
# See internal doc https://bsg-confluence.broadcom.net/pages/viewpage.action?pageId=689720737
networking_mode = "VPC_NATIVE"
private_cluster_config {
enable_private_endpoint = true
enable_private_nodes = true
}
master_authorized_networks_config {
cidr_blocks {
cidr_block = "10.0.0.0/8"
display_name = "corp internal networks"
}
}
ip_allocation_policy {
cluster_secondary_range_name = module.vpc.pods-range-name
services_secondary_range_name = module.vpc.services-range-name
cluster_secondary_range_name = "pods"
services_secondary_range_name = "services"
}
addons_config {
@@ -84,6 +97,8 @@ resource "google_container_node_pool" "main" {
cluster = google_container_cluster.main.name
name = each.key
max_pods_per_node = 64
autoscaling {
min_node_count = each.value.min
max_node_count = each.value.max
@@ -110,11 +125,13 @@ resource "google_container_node_pool" "main" {
disable-legacy-endpoints = "true"
}
# Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles.
service_account = google_service_account.default.email
oauth_scopes = [
service_account = data.google_service_account.default.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
# Tag to attach appropriate firewall rules.
tags = ["gke-broadcom"]
}
timeouts {

View File

@@ -1,10 +1,6 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "vpc-uri" {
value = module.vpc.uri
}
output "cluster-name" {
value = google_container_cluster.main.name
}

View File

@@ -1,4 +1,4 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "name" {
@@ -11,11 +11,6 @@ variable "zone" {
description = "The zone where the cluster should live."
}
variable "region" {
default = ""
description = "The region in which the cluster should be located at."
}
variable "project" {
description = "The Google GCP project to host the resources."
}
@@ -23,3 +18,18 @@ variable "project" {
variable "node-pools" {
description = "A list of node pool configurations to create and assign to the cluster."
}
variable "sharedVPCProject" {
description = "Name of the GCP project which contains the shared VPC."
type = string
}
variable "networkName" {
description = "Name of the shared VPC network to use for the cluster."
type = string
}
variable "subnetName" {
description = "Name of the GCP subnet to use for the cluster."
type = string
}

View File

@@ -1,69 +0,0 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
resource "google_compute_network" "main" {
name = var.name
auto_create_subnetworks = "false"
}
resource "google_compute_subnetwork" "main" {
name = "${var.name}-sn-1"
ip_cidr_range = var.vms-cidr
network = google_compute_network.main.name
region = var.region
secondary_ip_range = [
{
range_name = var.pods-range-name
ip_cidr_range = var.pods-cidr
},
{
range_name = var.services-range-name
ip_cidr_range = var.services-cidr
}
]
}
resource "google_compute_firewall" "internal-ingress" {
name = "${var.name}-internal"
network = google_compute_network.main.name
direction = "INGRESS"
source_ranges = [
var.vms-cidr,
var.pods-cidr,
var.services-cidr,
]
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
}
resource "google_compute_firewall" "external-ingress" {
name = "${var.name}-external"
network = google_compute_network.main.name
direction = "INGRESS"
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
ports = ["22"]
}
source_ranges = ["0.0.0.0/0"]
}

View File

@@ -1,22 +0,0 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "name" {
value = google_compute_network.main.name
}
output "subnet-name" {
value = google_compute_subnetwork.main.name
}
output "pods-range-name" {
value = var.pods-range-name
}
output "services-range-name" {
value = var.services-range-name
}
output "uri" {
value = google_compute_network.main.self_link
}

View File

@@ -1,32 +0,0 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "name" {
description = "TODO"
}
variable "region" {
description = "TODO"
}
variable "vms-cidr" {
description = "TODO"
}
variable "pods-cidr" {
description = "TODO"
}
variable "pods-range-name" {
default = "pods-range"
description = "TODO"
}
variable "services-cidr" {
description = "TODO"
}
variable "services-range-name" {
default = "services-range"
description = "TODO"
}

View File

@@ -1,4 +1,4 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# A piece of randomization that gets consumed by the
@@ -10,7 +10,29 @@ resource "random_id" "instance-name" {
byte_length = 4
}
# "data" reads a pre-existing resource without trying to manage its state.
data "google_compute_network" "private_network" {
provider = google-beta
project = var.sharedVPCProject
name = var.networkName
}
# This API needs to be enabled in our project before creating our Cloud SQL instance,
# or else we get error "googleapi: Error 400: Invalid request: Incorrect Service Networking config
# for instance: xxx:xxx:SERVICE_NETWORKING_NOT_ENABLED., invalid".
# See https://stackoverflow.com/a/66537918.
resource "google_project_service" "project" {
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}
resource "google_sql_database_instance" "main" {
provider = google-beta
# Allow "terraform destroy" for this db.
# deletion_protection = false
name = "${var.name}-${random_id.instance-name.hex}"
region = var.region
database_version = "POSTGRES_15"
@@ -20,6 +42,7 @@ resource "google_sql_database_instance" "main" {
disk_autoresize = true
disk_type = "PD_SSD"
tier = "db-custom-${var.cpus}-${var.memory_mb}"
edition = "ENTERPRISE" # cheaper than ENTERPRISE_PLUS
database_flags {
name = "log_min_duration_statement"
@@ -32,13 +55,14 @@ resource "google_sql_database_instance" "main" {
}
ip_configuration {
ipv4_enabled = "true"
require_ssl = "true"
# Disable assignment of a public IP address
ipv4_enabled = false
authorized_networks {
name = "all"
value = "0.0.0.0/0"
}
ssl_mode = "ENCRYPTED_ONLY"
private_network = data.google_compute_network.private_network.self_link
enable_private_path_for_google_cloud_services = true
}
backup_configuration {
@@ -68,8 +92,8 @@ resource "random_string" "password" {
resource "google_sql_user" "user" {
name = "atc"
instance = google_sql_database_instance.main.name
password = random_string.password.result
instance = google_sql_database_instance.main.name
password_wo = random_string.password.result
}
resource "google_sql_ssl_cert" "cert" {

View File

@@ -1,4 +1,4 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "username" {

View File

@@ -1,9 +1,9 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "name" {
default = ""
description = "The name of the CloudSQL instance to create (ps.: a random ID is appended to this name)"
description = "The name of the CloudSQL instance to create (ps.: a random ID is appended to this name)."
}
variable "memory_mb" {
@@ -18,20 +18,25 @@ variable "cpus" {
variable "zone" {
default = ""
description = "The zone where this instance is supposed to be created at (e.g., us-central1-a)"
description = "The zone where this instance is supposed to be created at (e.g., us-central1-a)."
}
variable "region" {
default = ""
description = "The region where the instance is supposed to be created at (e.g., us-central1)"
}
variable "disk_size_gb" {
default = ""
description = "The disk size in GB's (e.g. 10)"
description = "The region where the instance is supposed to be created at (e.g., us-central1)."
}
variable "max_connections" {
default = ""
description = "The max number of connections allowed by postgres"
description = "The max number of connections allowed by postgres."
}
variable "sharedVPCProject" {
description = "Name of the GCP project which contains the shared VPC."
type = string
}
variable "networkName" {
description = "Name of the shared VPC network to use for the db."
type = string
}

View File

@@ -1,17 +1,17 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
terraform {
required_providers {
google = "~> 5"
google-beta = "~> 5"
google = "~> 6"
google-beta = "~> 6"
}
backend "gcs" {
# By not providing credentials, you will use your current identity from the gcloud CLI.
# credentials = "gcp.json"
bucket = "tanzu-user-authentication-terraform-state"
prefix = "pinniped-concourse-jan2024"
bucket = "pinniped-ci-terraform-state"
prefix = "pinniped-concourse"
}
}

View File

@@ -1,23 +1,26 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# The static IP and related DNS entry.
# Create the static IP.
module "address" {
source = "./address"
dns-zone = var.dns-zone
subdomain = var.subdomain
sharedVPCProject = var.sharedVPCProject
concourseSubnetName = var.concourseSubnetName
}
# Instantiates the GKE Kubernetes cluster.
# Create the GKE Kubernetes cluster.
module "cluster" {
source = "./cluster"
name = "pinniped-concourse"
project = var.project
region = var.region
zone = var.zone
sharedVPCProject = var.sharedVPCProject
networkName = var.networkName
subnetName = var.concourseSubnetName
node-pools = {
"generic-1" = {
@@ -30,10 +33,10 @@ module "cluster" {
max = 2
min = 1
preemptible = false
version = "1.30.4-gke.1348000"
version = "1.32.2-gke.1297002"
},
"workers-2" = {
"workers-1" = {
auto-upgrade = true
disk-size = "100"
disk-type = "pd-ssd"
@@ -43,7 +46,7 @@ module "cluster" {
max = 5
min = 1
preemptible = false
version = "1.30.4-gke.1348000"
version = "1.32.2-gke.1297002"
},
}
}
@@ -52,10 +55,14 @@ module "cluster" {
module "database" {
source = "./database"
name = "pinniped-concourse"
name = "pinniped-concourse"
region = var.region
zone = var.zone
sharedVPCProject = var.sharedVPCProject
networkName = var.networkName
cpus = "4"
memory_mb = "7680"
region = var.region
zone = var.zone
max_connections = "300"
}

View File

@@ -1,14 +1,22 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "project" {
value = var.project
}
output "region" {
value = var.region
}
output "zone" {
value = var.zone
}
output "web-ip" {
value = module.address.ip
}
output "web-hostname" {
value = module.address.hostname
}
output "database-ip" {
value = module.database.ip
}
@@ -37,18 +45,6 @@ output "database-private-key" {
value = module.database.private-key
}
output "project" {
value = var.project
}
output "region" {
value = var.region
}
output "zone" {
value = var.zone
}
output "cluster-name" {
value = module.cluster.cluster-name
}

View File

@@ -1,28 +1,36 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "project" {
description = "The Google GCP project to host the resources"
description = "The Google GCP project to host the resources."
type = string
# Please provide the value of this variable by setting the env var TF_VAR_project for all terraform commands.
}
variable "region" {
description = "The cloud provider region where the resources created"
default = "us-central1"
description = "The cloud provider region where the resources created."
default = "us-west1"
}
variable "zone" {
description = "The cloud provider zone where the resources are created"
default = "us-central1-c"
description = "The cloud provider zone where the resources are created."
default = "us-west1-c"
}
variable "dns-zone" {
description = "The default DNS zone to use when creating subdomains"
default = "pinniped-dev"
variable "sharedVPCProject" {
description = "Name of the GCP project which contains the shared VPC."
type = string
# Please provide the value of this variable by setting the env var TF_VAR_sharedVPCProject for all terraform commands.
}
variable "subdomain" {
description = "Subdomain under the DNS zone to register"
default = "ci"
variable "networkName" {
description = "Name of the shared VPC network."
type = string
# Please provide the value of this variable by setting the env var TF_VAR_networkName for all terraform commands.
}
variable "concourseSubnetName" {
description = "Name of the GCP subnet to use for concourse."
type = string
# Please provide the value of this variable by setting the env var TF_VAR_concourseSubnetName for all terraform commands.
}