initial commit on ci branch: migrates code from private repo

This commit is contained in:
Ryan Richard
2024-10-29 12:24:30 -07:00
parent a3d64bef62
commit 11bd69cf2d
194 changed files with 16873 additions and 16 deletions

21
.gitignore vendored
View File

@@ -1,15 +1,6 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
.idea
.terraform
*.tfstate.*
*.tfstate
kubeconfig.yaml
.DS_Store

602
AD-SETUP.md Normal file
View File

@@ -0,0 +1,602 @@
# Creating an Active Directory server on Google Cloud for Pinniped integration tests
This documents the steps that were taken to create our test AD server used by the integration tests.
The integration tests use LDAPS and StartTLS to connect to the AD server.
## Create a Windows Server VM and configure it as an AD Domain Controller
The steps in this section were mostly inspired by
https://cloud.google.com/architecture/deploy-an-active-directory-forest-on-compute-engine.
From your Mac, create a VPC, subnet, firewall rules, admin password, reserved static IP, and the VM itself.
On your Mac:
```shell
# Login as yourself.
gcloud auth login
# Set some variables.
project="REDACTED" # Change this to be the actual project name before running these commands.
region="us-central1"
zone="us-central1-b"
vpc_name="ad"
# Create VPC.
gcloud compute networks create ${vpc_name} \
--project ${project} \
--description "VPC network to deploy Active Directory" \
--subnet-mode custom
# Create subnet.
# The google tutorial says to "enable Private Google Access so that Windows can activate without internet access."
gcloud compute networks subnets create domain-controllers \
--project ${project} --region ${region} \
--network ${vpc_name} \
--range "10.0.0.0/28" \
--enable-private-ip-google-access
# Create a firewall rule to allow RDP. Find out what your public IP address is by going to https://whatismyipaddress.com.
# Copy/paste your IPv4 address into this rule. Replace the X.X.X.X placeholder address shown here with your real IP.
gcloud compute firewall-rules create allow-rdp-ingress-to-addc \
--project ${project} \
--direction INGRESS \
--action allow \
--rules tcp:3389 \
--source-ranges "X.X.X.X/32" \
--target-tags ad-domaincontroller \
--network ${vpc_name} \
--priority 10000
# Allow LDAPS (port 636) from the whole internet.
gcloud compute firewall-rules create allow-ldaps-ingress-to-addc \
--project ${project} \
--direction INGRESS \
--action allow \
--rules tcp:636 \
--source-ranges "0.0.0.0/0" \
--target-tags ad-domaincontroller \
--network ${vpc_name} \
--priority 10000
# Allow LDAP (port 389) from the whole internet, to allow the integration tests to use StartTLS.
gcloud compute firewall-rules create allow-ldap-ingress-to-addc \
--project ${project} \
--direction INGRESS \
--action allow \
--rules tcp:389 \
--source-ranges "0.0.0.0/0" \
--target-tags ad-domaincontroller \
--network ${vpc_name} \
--priority 10000
# Reserve a static public IP address for the domain controller VM.
addressOfDc1=$(gcloud compute addresses create ad-domain-controller \
--project ${project} --region ${region} \
--format="value(address)")
# Create an admin password for the Administrator user on Windows, and save it to secrets manager.
password="$(openssl rand -hex 8)-$(openssl rand -hex 8)"
echo -n "$password" > password.tmp
gcloud secrets create active-directory-dc1-password \
--project ${project} \
--data-file password.tmp
rm password.tmp
# This creates a service account called ad-domaincontroller@PROJECT_NAME.iam.gserviceaccount.com
# (where PROJECT_NAME is the actual GCP project name) and sets the account name to the
# variable $dcServiceAccount.
dcServiceAccount=$(gcloud iam service-accounts create ad-domaincontroller \
--project ${project} \
--display-name "AD Domain Controller VM Service Account" \
--format "value(email)")
# Allow the new service account to temporarily read the Windows admin password from secret manager.
# The following `date` command might only work on MacOS. It prints the time like this: 2024-10-23T19:20:36Z
one_hour_from_now=$(TZ=UTC date -v "+1H" +"%Y-%m-%dT%H:%M:%SZ")
gcloud secrets add-iam-policy-binding active-directory-dc1-password \
--project ${project} \
"--member=serviceAccount:$dcServiceAccount" \
--role=roles/secretmanager.secretAccessor \
--condition="title=Expires after 1h,expression=request.time < timestamp('$one_hour_from_now')"
# Optional: list all bindings to see the binding that you just created.
gcloud secrets get-iam-policy active-directory-dc1-password \
--project ${project}
# Create a powershell startup script in a local file.
cat <<"EOF" > dc-startup.ps1
$ErrorActionPreference = "Stop"
#
# Only run the script if the VM is not a domain controller already.
#
if ((Get-CimInstance -ClassName Win32_OperatingSystem).ProductType -eq 2) {
exit
}
#
# Read configuration from metadata.
#
Import-Module "${Env:ProgramFiles}\Google\Compute Engine\sysprep\gce_base.psm1"
Write-Host "Reading metadata..."
$ActiveDirectoryDnsDomain = Get-MetaData -Property "attributes/ActiveDirectoryDnsDomain" -instance_only
$ActiveDirectoryNetbiosDomain = Get-MetaData -Property "attributes/ActiveDirectoryNetbiosDomain" -instance_only
$ProjectId = Get-MetaData -Property "project-id" -project_only
$AccessToken = (Get-MetaData -Property "service-accounts/default/token" | ConvertFrom-Json).access_token
#
# Read the DSRM password from secret manager.
#
Write-Host "Reading secret from secret manager..."
$Secret = (Invoke-RestMethod `
-Headers @{
"Metadata-Flavor" = "Google";
"x-goog-user-project" = $ProjectId;
"Authorization" = "Bearer $AccessToken"} `
-Uri "https://secretmanager.googleapis.com/v1/projects/$ProjectId/secrets/active-directory-dc1-password/versions/latest:access")
$DsrmPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Secret.payload.data))
$DsrmPassword = ConvertTo-SecureString -AsPlainText $DsrmPassword -force
#
# Promote.
#
Write-Host "Setting administrator password..."
Set-LocalUser -Name Administrator -Password $DsrmPassword
Write-Host "Creating a new forest $ActiveDirectoryDnsDomain ($ActiveDirectoryNetbiosDomain)..."
Install-ADDSForest `
-DomainName $ActiveDirectoryDnsDomain `
-DomainNetbiosName $ActiveDirectoryNetbiosDomain `
-SafeModeAdministratorPassword $DsrmPassword `
-DomainMode Win2008R2 `
-ForestMode Win2008R2 `
-InstallDns `
-CreateDnsDelegation:$False `
-NoRebootOnCompletion:$True `
-Confirm:$false
#
# Configure DNS.
#
Write-Host "Configuring DNS settings..."
Get-Netadapter| Disable-NetAdapterBinding -ComponentID ms_tcpip6
Set-DnsClientServerAddress `
-InterfaceIndex (Get-NetAdapter -Name Ethernet).InterfaceIndex `
-ServerAddresses 127.0.0.1
#
# Enable LSA protection.
#
New-ItemProperty `
-Path "HKLM:\SYSTEM\CurrentControlSet\Control\Lsa" `
-Name "RunAsPPL" `
-Value 1 `
-PropertyType DWord
Write-Host "Restarting to apply all settings..."
Restart-Computer
EOF
# Create a domain controller VM.
# E2 are the cheapest VMs. e2-medium has 2 vCPUs (shared with other customers) and 4 GB of memory.
# See https://cloud.google.com/compute/docs/general-purpose-machines#e2-shared-core.
# When we originally set up this VM, we actually started it as n2-standard-2 and after we
# finished setting up everything as shown in this guide, then we stopped the VM and changed its
# type to e2-medium and started the VM again. Maybe it would work fine to create it as
# e2-medium from the beginning, but note that we didn't actually test that.
gcloud compute instances create active-directory-dc1 \
--project ${project} \
--zone ${zone} \
--image-family windows-2022 \
--image-project windows-cloud \
--machine-type e2-medium \
--tags ad-domaincontroller \
--metadata "ActiveDirectoryDnsDomain=activedirectory.test.pinniped.dev,ActiveDirectoryNetbiosDomain=pinniped-ad,sysprep-specialize-script-ps1=Install-WindowsFeature AD-Domain-Services -IncludeManagementTools; Install-WindowsFeature DNS,disable-account-manager=true" \
--metadata-from-file windows-startup-script-ps1=dc-startup.ps1 \
--address ${addressOfDc1} \
--subnet=domain-controllers \
--service-account "$dcServiceAccount" \
--scopes cloud-platform \
--shielded-integrity-monitoring \
--shielded-secure-boot \
--shielded-vtpm
# Monitor the initialization process of the first domain controller by viewing its serial port output.
# It should install the sysprep stuff, reboot, run our startup script, and then reboot again.
gcloud compute instances tail-serial-port-output active-directory-dc1 \
--project ${project} \
--zone ${zone}
# Use CTRL-C to cancel tailing the output.
```
## Update DNS
Update the Cloud DNS entry for `activedirectory.test.pinniped.dev.` to be an "A" record pointing to the
public static IP of the VM. This is easier to do in the Cloud DNS UI in your browser.
It would take many gcloud CLI commands to accomplish the same task.
## Configure test users and groups
Make sure you have an RDP client installed. On a Mac, you can install RDP from the App Store.
It was recently renamed "Windows App".
Note: To copy/paste in the RDP client, you may need to use CTRL-C/CTRL-V if CMD-C/CMD-V don't work.
RDP into the Windows VM. To connect, use `activedirectory.test.pinniped.dev` as the name of the server,
the username `Administrator`, and the password from the `active-directory-dc1-password` entry in Secrets Manager.
You can ignore the RDP certificate error.
In your RDP session, open Powershell. Then run the following commands to add some users and groups,
change the password policy, and grant some permissions.
Before running the commands, replace the redacted passwords as follows:
- The value for `REDACTED_BIND_USER_PASSWORD` can be found at `aws-ad-bind-account-password` in the `concourse-secrets` secret
- The value for `REDACTED_PINNY_USER_PASSWORD` can be found at `aws-ad-user-password` in the `concourse-secrets` secret
- The value for `REDACTED_DEACTIVATED_USER_PASSWORD` can be found at `aws-ad-deactivated-user-password` in the `concourse-secrets` secret
```shell
New-ADOrganizationalUnit -Name "pinniped-ad" `
-ProtectedFromAccidentalDeletion $false
New-ADOrganizationalUnit -Name "Users" `
-Path "OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
-ProtectedFromAccidentalDeletion $false
New-ADOrganizationalUnit -Name "test-users" `
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
-Description "integration tests will create and delete ephemeral users here" `
-ProtectedFromAccidentalDeletion $false
# Print all OUs to validate that they were created.
Get-ADOrganizationalUnit -Filter *
New-ADUser -Name "Bind User" -SamAccountName "bind-user" -GivenName "Bind" -Surname "User" -DisplayName "Bind User" `
-UserPrincipalName "bind-user@activedirectory.test.pinniped.dev" `
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
-AccountPassword (ConvertTo-SecureString "REDACTED_BIND_USER_PASSWORD" -AsPlainText -Force) `
-Enabled $true
# Note that the value of EmailAddress is not a real email address, but that's okay.
New-ADUser -Name "Pinny Seal" -SamAccountName "pinny" -GivenName "Pinny" -Surname "Seal" -DisplayName "Pinny Seal" `
-UserPrincipalName "pinny@activedirectory.test.pinniped.dev" `
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
-EmailAddress "tanzu-user-authentication@groups.vmware.com" `
-AccountPassword (ConvertTo-SecureString "REDACTED_PINNY_USER_PASSWORD" -AsPlainText -Force) `
-Enabled $true
New-ADUser -Name "Deactivated User" -SamAccountName "deactivated-user" -GivenName "Deactivated" -Surname "User" -DisplayName "Deactivated User" `
-UserPrincipalName "deactivated-user@activedirectory.test.pinniped.dev" `
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
-AccountPassword (ConvertTo-SecureString "REDACTED_DEACTIVATED_USER_PASSWORD" -AsPlainText -Force) `
-Enabled $false
# Take note of the pinny account's ObjectGUID. You will need to edit the concourse-secrets secret later to update this GUID value.
# This value should look something like "288188dd-ab76-4f61-b6e4-c72e081502c5".
Get-ADUser pinny -Properties * | Select SamaccountName,ObjectGUID
# Print all users to validate that they were created.
Get-ADUser -Filter *
New-ADGroup -Name "Marine Mammals" -SamAccountName "Marine Mammals" -DisplayName "Marine Mammals" `
-GroupCategory Security -GroupScope Global `
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
Add-ADGroupMember -Identity "Marine Mammals" -Members "pinny"
New-ADGroup -Name "Mammals" -SamAccountName "Mammals" -DisplayName "Mammals" `
-GroupCategory Security -GroupScope Global `
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
Add-ADGroupMember -Identity "Mammals" -Members "Marine Mammals"
# Change the default password policy. There are some integration tests that rely on this.
# This is the equivalent of doing this in the Windows "Active Directory Administrative Center" UI:
# check "enforce account lockout policy", give it 20 failed attempts and a 15-minute reset, then
# uncheck "enforce minimum password age" so we can change the password immediately upon creating a user.
Set-ADDefaultDomainPasswordPolicy -Identity "activedirectory.test.pinniped.dev" `
-LockoutThreshold 20 -LockoutDuration "00:15:00" -LockoutObservationWindow "00:15:00" `
-MinPasswordAge 0
# Print the policy to validate that it was updated.
Get-ADDefaultDomainPasswordPolicy
# We need to allow the bind-user to create/delete/edit users and groups within the test-users OU, because several
# integration tests want to crate/delete/edit ephemeral test users and groups.
# These access control steps were inspired by https://the-itguy.de/delegate-access-in-active-directory-with-powershell/.
# This is intended to be the equivalent of using the UI to assign permissions like this: right click on "test-users",
# select Delegate Control, select "bind-user" as the user, select "create, delete and manage user accounts" and
# "reset user passwords" as the tasks to delegate.
function New-ADDGuidMap
{
$rootdse = Get-ADRootDSE
$guidmap = @{ }
$GuidMapParams = @{
SearchBase = ($rootdse.SchemaNamingContext)
LDAPFilter = "(schemaidguid=*)"
Properties = ("lDAPDisplayName", "schemaIDGUID")
}
Get-ADObject @GuidMapParams | ForEach-Object { $guidmap[$_.lDAPDisplayName] = [System.GUID]$_.schemaIDGUID }
return $guidmap
}
$GuidMap = New-ADDGuidMap
$BindUserSID = New-Object System.Security.Principal.SecurityIdentifier (Get-ADUser "bind-user").SID
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
$ace1 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["user"]
$ace2 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["user"], "All"
$ace3 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["group"]
$ace4 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["group"], "All"
$acl.AddAccessRule($ace1)
$acl.AddAccessRule($ace2)
$acl.AddAccessRule($ace3)
$acl.AddAccessRule($ace4)
Set-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" -AclObject $acl
# Print the access control rules that were just applied.
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
$acl.Access | Where-Object { $_.IdentityReference -eq "pinniped-ad\bind-user" }
```
If you would like to see these OUs, users, and groups in the UI, you can open the "Active Directory Users and Computers"
app in your RDP session.
## Configure a CA and a serving certificate for LDAPS
Now we need to create and configure a TLS serving certificate for LDAPS.
The certificate needs to include two hostnames. One of the hostnames is the name that the AD server
thinks is its own hostname (`active-directory-dc1.activedirectory.test.pinniped.dev`).
This is how the AD server will decide to use this cert for the LDAPS port.
The other hostname is the one that clients will use when making connections from the outside
(`activedirectory.test.pinniped.dev`) so they can validate the server certificate.
The steps here were inspired by https://gist.github.com/magnetikonline/0ccdabfec58eb1929c997d22e7341e45.
On your mac:
```shell
# On your Mac: Create a self-signed CA public/private keypair.
openssl req -x509 -newkey rsa:4096 \
-keyout ad-ca.key -out ad-ca.crt \
-sha256 -days 36500 -nodes \
-subj "/C=US/ST=California/L=San Francisco/O=Pinniped/OU=Pinniped CI/CN=Pinniped AD CA"
# Copy the public key to your clipboard.
cat ad-ca.crt| pbcopy
```
In Powershell terminal:
```shell
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the public
# key again before you hit return for this command.
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\ca.crt"
# In Powershell terminal, check that the file exists and looks correct.
type "C:\users\administrator\desktop\ca.crt"
# Import root certificate into trusted store of domain controller in your Powershell terminal:
Import-Certificate -FilePath "C:\users\administrator\desktop\ca.crt" -CertStoreLocation Cert:\LocalMachine\Root
```
If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
and look in the folder called "Trusted Root Certification Authorities\Certificates".
If the UI was already open, click the refresh button.
Copy the following file contents to your clipboard:
```shell
[Version]
Signature="$Windows NT$"
[NewRequest]
Subject = "CN=activedirectory.test.pinniped.dev"
KeySpec = 1
KeyLength = 2048
Exportable = TRUE
MachineKeySet = TRUE
SMIME = FALSE
PrivateKeyArchive = FALSE
UserProtected = FALSE
UseExistingKeySet = FALSE
ProviderName = "Microsoft RSA SChannel Cryptographic Provider"
ProviderType = 12
RequestType = PKCS10
KeyUsage = 0xa0
[EnhancedKeyUsageExtension]
OID = 1.3.6.1.5.5.7.3.1 ; Server Authentication
[Extensions]
2.5.29.17 = "{text}"
_continue_ = "DNS=activedirectory.test.pinniped.dev"
_continue_ = "DNS=active-directory-dc1.activedirectory.test.pinniped.dev"
```
In Powershell terminal:
```shell
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
# Note that if you copy/paste this command to your RDP session, then you need to copy the file contents
# from above again before you hit return for this command.
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\request.inf"
# In Powershell terminal, check that the file exists and looks correct.
type "C:\users\administrator\desktop\request.inf"
# Create a CSR. This command will also generate a private key for the AD server and save it.
certreq -new "C:\users\administrator\desktop\request.inf" "C:\users\administrator\desktop\client.csr"
# Show the CSR.
type "C:\users\administrator\desktop\client.csr"
# Copy the content of this file to your clipboard.
Get-Content "C:\users\administrator\desktop\client.csr" | Set-Clipboard
```
On your mac:
```shell
# On your Mac, use the CA to issue a serving cert based on the CSR.
pbpaste > client.csr
cat <<EOF > v3ext.txt
keyUsage=digitalSignature,keyEncipherment
extendedKeyUsage=serverAuth
subjectKeyIdentifier=hash
subjectAltName = @alt_names
[alt_names]
DNS.1 = activedirectory.test.pinniped.dev
DNS.2 = active-directory-dc1.activedirectory.test.pinniped.dev
EOF
# Create a cert from the CSR signed by the CA.
openssl x509 \
-req -days 36500 \
-in client.csr -CA ad-ca.crt -CAkey ad-ca.key -extfile v3ext.txt \
-set_serial 01 -out client.crt
# Inspect the generated certificate.
# Ensure the following X509v3 extensions are all present:
# Key Usage: Digital Signature, Key Encipherment
# Extended Key Usage: TLS Web Server Authentication
# Subject Key Identifier
# Subject Alternative Name with 2 DNS hostnames
# Authority Key Identifier
openssl x509 -in client.crt -text
# Copy the generated cert.
cat client.crt | pbcopy
```
In Powershell terminal:
```shell
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the file contents
# from above again before you hit return for this command.
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\client.crt"
# In Powershell terminal, check that the file exists and looks correct.
type "C:\users\administrator\desktop\client.crt"
# Add the serving certificate to Windows. This will also automatically associate it to the private key that you
# generated with the previous usage of certreq.
certreq -accept "C:\users\administrator\desktop\client.crt"
# If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
# and look in the folder called "Personal\Certificates". If the UI was already open, click the refresh button.
# Double click on the cert. Ensure that it says, "you have a private key that corresponds to this certificate".
# Next, we need to reboot the VM for the cert to get picked up and used for serving incoming LDAPS connections.
# After showing you a warning dialog box, this should terminate your RDP session and stop the VM.
shutdown /s
```
Wait for the VM to stop, then start the VM again from your Mac:
```shell
gcloud compute instances start active-directory-dc1 --project ${project} --zone ${zone}
```
Wait for the VM to finish booting. Then we can confirm that LDAPS is working. On your Mac:
```shell
# Check that serving cert is being returned on the LDAPS port. This command should show the cert chain.
# It should also verify the server cert using our CA. The output should include "Verify return code: 0 (ok)".
openssl s_client -connect activedirectory.test.pinniped.dev:636 -showcerts -CAfile ad-ca.crt < /dev/null
# Unfortunately, the ldapsearch command that comes pre-installed on MacOS does not seem to respect
# the LDAPTLS_CACERT env variable. So it will not be able to validate the server certificates.
# As a workaround, we can use docker to run ldapsearch commands in a linux container.
# Test the regular LDAP port by issuing a query on your Mac. The -ZZ option asks it to use StartTLS.
# This should list all users. Replace REDACTED_BIND_USER_PASSWORD with the real password.
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
ldapsearch -d8 -v -x -ZZ -H 'ldap://activedirectory.test.pinniped.dev' \
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
-w 'REDACTED_BIND_USER_PASSWORD' \
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
-s sub \
'(objectClass=user)' '*'
# Test the LDAPS port by issuing a query on your Mac. This should list all users.
# Replace REDACTED_BIND_USER_PASSWORD with the real password.
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
ldapsearch -d8 -v -x -H 'ldaps://activedirectory.test.pinniped.dev' \
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
-w 'REDACTED_BIND_USER_PASSWORD' \
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
-s sub \
'(objectClass=user)' '*'
```
## Update the `concourse-secrets` secret in GCP Secrets Manager
On your Mac:
```shell
# Copy the CA's public cert.
cat ad-ca.crt | base64 | pbcopy
# cd to your local clone of the `ci` branch of the pinniped repo
cd pinniped-ci-branch
# Edit the secret.
./hack/edit-gcloud-secret.sh concourse-secret
# This opens vim to edit the secret.
# Paste the cert as the value for `aws-ad-ca-data`.
# Also edit the the value of `aws-ad-user-unique-id-attribute-value`. The value should be the ObjectGUID of the pinny
# user that you created in the steps above.
# Save your changes, exit vim, and when prompted say that you want to save this as the new version of concourse-secrets.
```
## Confirm that Active Directory integration tests can pass
Use these commands run all the Active Directory integration tests on your Mac.
The `-run` filter is based on the tests as they existed at the time of writing this doc.
You can find AD tests by searching for `SkipTestWhenActiveDirectoryIsUnavailable`.
On your Mac:
```shell
# Login so we can read the secrets from GCP Secret Manager.
gcloud auth login
# cd to your local git clone
cd pinniped
# Compile and install onto a local kind cluster.
./hack/prepare-for-integration-tests.sh -c --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh"
# Run all the tests that depend on AD.
source /tmp/integration-test-env && go test -v -race -count 1 -timeout 0 ./test/integration \
-run "/TestSupervisorLogin_Browser/active_directory|/TestE2EFullIntegration_Browser/with_Supervisor_ActiveDirectory|/TestActiveDirectoryIDPPhaseAndConditions_Parallel|/TestSupervisorWarnings_Browser/Active_Directory"
```
## Cleanup
On your Mac:
```shell
# Remove all bindings for the service account from the secret.
# The binding was only needed during the first boot of the VM.
gcloud secrets remove-iam-policy-binding active-directory-dc1-password \
--project ${project} \
--member "serviceAccount:${dcServiceAccount}" --role roles/secretmanager.secretAccessor \
--all
# Remove the firewall rule which allows incoming RDP connections.
# If you need to RDP to this AD VM in the future, then you will need to create
# a new firewall rule to allow it.
gcloud compute firewall-rules delete allow-rdp-ingress-to-addc \
--project ${project} \
--quiet
# Remove all temp files. It's okay to remove the private key for our CA because we
# created certs that are good for 100 years, as long as you have already added the
# public cert to the concourse-secrets secret. If we need to create a new AD VM, we
# can also create a new CA.
rm ad-ca.crt ad-ca.key client.crt client.csr v3ext.txt
```

1
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1 @@
Please see https://github.com/vmware-tanzu/pinniped/blob/main/CODE_OF_CONDUCT.md

1
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1 @@
Please see https://github.com/vmware-tanzu/pinniped/blob/main/CONTRIBUTING.md

202
LICENSE Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
MAINTAINERS.md Normal file
View File

@@ -0,0 +1 @@
Please see https://github.com/vmware-tanzu/pinniped/blob/main/MAINTAINERS.md

181
README.md
View File

@@ -1 +1,180 @@
# placeholder-name
# Pinniped's `ci` branch
This `ci` branch contains the CI/CD tooling for [Pinniped](https://github.com/vmware-tanzu/pinniped).
The documentation and code in this branch is mainly intended for the maintainers of Pinniped.
This branch is not intended to be merged to the `main` branch.
The code in the branch previously lived in a private repository. It was made public by moving
the code into the `ci` branch of the Pinniped repository in late 2024. The previous git history
for these files was not copied from the private repository at the time of this migration.
## Reporting an issue in this branch
Found a bug or would like to make an enhancement request?
Please report issues in the [this repo](https://github.com/vmware-tanzu/pinniped).
## Reporting security vulnerabilities
Please follow the procedure described in [SECURITY.md](https://github.com/vmware-tanzu/pinniped/blob/main/SECURITY.md).
## Creating a release
When the team is preparing to ship a release, a maintainer will create a new
GitHub [Issue](https://github.com/vmware-tanzu/pinniped/issues/new/choose) in this repo to
collaboratively track progress on the release checklist. As tasks are completed,
the team will check them off. When all the tasks are completed, the issue is closed.
The release checklist is committed to this repo as an [issue template](https://github.com/vmware-tanzu/pinniped/tree/main/.github/ISSUE_TEMPLATE/release_checklist.md).
## Pipelines
Pinniped uses [Concourse](https://concourse-ci.org) for CI/CD.
Our Concourse can be found at [ci.pinniped.dev](https://ci.pinniped.dev).
The following pipelines are implemented in this branch. Not all pipelines are necessarily publicly visible, although our goal is to make them all visible.
- `main`
This is the main pipeline that runs on merges to `main`. It builds, tests, and (when manually triggered) releases from main.
- `pull-requests`
This is a pipeline that triggers for each open pull request. It runs a smaller subset of the integration tests and validations as `pinniped`.
- `dockerfile-builders`
This pipeline builds a bunch of custom utility container images that are used in our CI and testing.
- `build-gi-cli` (a container image that includes the GitHub CLI)
- `build-github-pr-resource` (a [fork](https://github.com/pinniped-ci-bot/github-pr-resource) of the `github-pr-resource` with support for gating PRs for untrusted users)
- `build-code-coverage-uploader` (uploading code coverage during unit tests)
- `build-eks-deployer-dockerfile` (deploying our app to EKS clusters)
- `build-k8s-app-deployer-dockerfile` (deploying our app to clusters)
- `build-pool-trigger-resource-dockerfile` (an updated implementation of the [pool-trigger-resource](https://github.com/cfmobile/pool-trigger-resource) for use in our CI)
- `build-integration-test-runner-dockerfile` (running our integration tests)
- `build-integration-test-runner-beta-dockerfile` (running our integration tests with the latest Chrome beta version)
- `build-deployment-yaml-formatter-dockerfile` (templating our deployment YAML during a release)
- `build-crane` (copy and tag container images during release)
- `build-k8s-code-generator-*` (running our Kubernetes code generation under different Kubernetes dependency versions)
- `build-test-dex` (a Dex used during tests)
- `build-test-cfssl` (a cfssl used during tests)
- `build-test-kubectl` (a kubectl used during tests)
- `build-test-forward-proxy` (a Squid forward proxy used during tests)
- `build-test-bitnami-ldap` (an OpenLDAP used during tests)
- `cleanup-aws`
This runs a script that runs [aws-nuke](https://github.com/rebuy-de/aws-nuke) against our test AWS account.
This was occasionally needed because [eksctl](https://eksctl.io/) sometimes fails and leaks AWS resources. These resources cost money and use up our AWS quota.
However, we seem to have worked around these issues and this pipeline has not been used for some time.
These jobs are only triggered manually. This is dangerous and should be used with care.
- `concourse-workers`
Deploys worker replicas on a long-lived GKE cluster that runs the Concourse workers, and can scale them up or down.
- `go-compatibility`
This pipeline runs nightly jobs that validate the compatibility of our code as a Go module in various contexts. We have jobs that test that our code compiles under older Go versions and that our CLI can be installed using `go install`.
- `security-scan`
This pipeline has nightly jobs that run security scans on our current main branch and most recently released artifacts.
The tools we use are:
- [sonatype-nexus-community/nancy](https://github.com/sonatype-nexus-community/nancy), which scans Go module versions.
- [aquasecurity/trivy](https://github.com/aquasecurity/trivy), which scans container images and Go binaries.
- [govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck), which scans Go code to find calls to known-vulnerable dependencies.
This pipeline also has a job called `all-golang-deps-updated` which automatically submits PRs to update all
direct dependencies in Pinniped's go.mod file, and update the Golang and distroless container images used in
Pinniped's Dockerfiles.
- `kind-node-builder`
A nightly build job which uses the latest version of kind to build the HEAD of master of Kubernetes as a container
image that can be used to deploy kind clusters. Other pipelines use this container image to install Pinniped and run
integration tests. This gives us insight in any compatibility problems with the upcoming next release of Kubernetes.
## Deploying pipeline changes
After any shared tasks (`./pipelines/shared-tasks`) or helpers (`./pipelines/shared-helpers`) are edited,
the commits must be pushed to the `ci` branch of this repository to take effect.
After editing any CI secrets or pipeline definitions, a maintainer must run the corresponding
`./pipelines/$PIPELINE_NAME/update-pipeline.sh` script to apply the changes to Concourse.
To deploy _all_ pipelines, a maintainer can run `./pipelines/update-all-pipelines.sh`.
Don't forget to commit and push your changes after applying them!
## Github webhooks for pipelines
Some pipelines use github [webhooks to trigger resource checks](https://concourse-ci.org/resources.html#schema.resource.webhook_token),
rather than the default of polling every minute, to make these pipelines more responsive and use fewer compute resources
for running checks. Refer to places where `webhook_token` is configured in various `pipeline.yml` files.
To make these webhooks work, they must be defined on the [GitHub repo's settings](https://github.com/vmware-tanzu/pinniped/settings/hooks).
## Installing and operating Concourse
See [infra/README.md](./infra/README.md) for details about how Concourse was installed and how it can be operated.
## Acceptance environments
In addition to the many ephemeral Kubernetes clusters we use for testing, we also deploy a long-running acceptance environment.
Google Kubernetes Engine (GKE) in the `gke-acceptance-cluster` cluster in our GCP project in the `us-central1-c` availability zone.
To access this cluster, download the kubeconfig to `gke-acceptance.yaml` by running:
```cmd
KUBECONFIG=gke-acceptance.yaml gcloud container clusters get-credentials gke-acceptance-cluster --project "$PINNIPED_GCP_PROJECT" --zone us-central1-c
```
The above command assumes that you have already set `PINNIPED_GCP_PROJECT` to be the name of the GCP project.
## CI secrets
We use [Google Secret Manager](https://cloud.google.com/secret-manager) on GCP to store build/test/release secrets.
These secrets are only available to the maintainers.
Using the `gcloud secrets list` command or the [web console](https://console.cloud.google.com/security/secret-manager),
you can list the available secrets. The content of each secret is a YAML file with secret key/value pairs.
You can also use the `./hack/edit-gcloud-secret.sh <secretName>` script to edit or inspect each secret.
## Setting Up Active Directory Test Environment
To test the `ActiveDirectoryIdentityProvider` functionality, we have a long-running Active Directory Domain Controller
server instance in our GCP account. See [AD-SETUP.md](AD-SETUP.md) for details.
## Running integration tests on your laptop using AD
The relevant environment variables can be pulled from the secret manager via the `hack/get-active-directory-env-vars.sh` script.
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
```bash
# Must authenticate to glcoud to access the secret manager.
gcloud auth login
# In the pinniped repo's main branch or in your PR branch:
hack/prepare-for-integration-tests.sh --get-active-directory-vars "$HOME/path/to/pinniped-ci-branch/hack/get-active-directory-env-vars.sh"
```
## Running integration tests on your laptop using GitHub
The relevant environment variables can be pulled from the secret manager via the `hack/get-github-env-vars.sh` script.
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
```bash
# Must authenticate to glcoud to access the secret manager.
gcloud auth login
# In the pinniped repo's main branch or in your PR branch:
hack/prepare-for-integration-tests.sh --get-github-vars "$HOME/path/to/pinniped-ci-branch/hack/get-github-env-vars.sh"
```
## License
Pinniped is open source and licensed under Apache License Version 2.0. See [LICENSE](LICENSE).
Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.

1
SECURITY.md Normal file
View File

@@ -0,0 +1 @@
Please see https://github.com/vmware-tanzu/pinniped/blob/main/SECURITY.md

View File

@@ -0,0 +1,14 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# For running Go linters
FROM debian:12.7-slim AS builder
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
RUN curl -sfLo /tmp/codecov https://uploader.codecov.io/latest/linux/codecov
RUN chmod +x /tmp/codecov
FROM golang:1.23.2
RUN apt-get update -y && apt-get dist-upgrade -y
COPY --from=builder /tmp/codecov /usr/local/bin/codecov

View File

@@ -0,0 +1,10 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
FROM gcr.io/go-containerregistry/crane as crane
FROM mikefarah/yq:4.44.3 AS yq
FROM golang:1.23
COPY --from=yq /usr/bin/yq /usr/local/bin
COPY --from=crane /ko-app/crane /usr/local/bin
ENTRYPOINT ["bash"]

View File

@@ -0,0 +1,16 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
FROM mikefarah/yq:4.44.3 AS yq
FROM debian:12.7-slim
# Note: libdigest-sha-perl is to get shasum, which is used when installing Carvel tools below.
RUN apt-get update && apt-get install -y ca-certificates jq curl libdigest-sha-perl && rm -rf /var/lib/apt/lists/*
# Install Carvel tools.
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
# Install yq.
COPY --from=yq /usr/bin/yq /usr/local/bin/yq

View File

@@ -0,0 +1,25 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# For deploying an EKS cluster and setting it up to run our tests.
FROM weaveworks/eksctl:v0.193.0 AS eksctl
FROM mikefarah/yq:4.44.3 AS yq
FROM amazon/aws-cli:2.18.15
RUN yum update -y && yum install -y jq && yum install -y perl-Digest-SHA && yum clean all
COPY --from=eksctl /usr/local/bin /usr/local/bin
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
# Install Carvel tools.
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
# Install aws-iam-authenticator.
# This gets installed automatically via eksctl, but currently it downloads v0.5.2,
# which will give us a v1alpha1 execcredential rather than a v1beta1 which we want.
# When this has changed, we can delete this:
# https://github.com/weaveworks/eksctl/blob/main/build/docker/Dockerfile#L49
RUN curl -sfL \
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
-o /usr/local/bin/aws-iam-authenticator \
&& chmod u+x /usr/local/bin/aws-iam-authenticator

View File

@@ -0,0 +1,15 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# For running the GitHub CLI.
FROM debian:12.7-slim AS builder
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
RUN curl \
-sfLo /tmp/gh.tar.gz \
https://github.com/cli/cli/releases/download/v2.40.0/gh_2.40.0_linux_amd64.tar.gz \
&& tar -C /tmp --strip-components=1 -xzvf /tmp/gh.tar.gz
FROM golang:1.23.2
COPY --from=builder /tmp/bin/gh /usr/local/bin/gh

View File

@@ -0,0 +1,80 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# For running the integration tests as a client to a k8s cluster
FROM mikefarah/yq:4.44.3 AS yq
# We need gcloud for running integration tests against GKE
# because the kubeconfig uses gcloud as an `auth-provider`.
# Use FROM gcloud-sdk instead of FROM golang because its
# a lot easier to install Go than to install gcloud in the
# subsequent commands below.
FROM google/cloud-sdk:498.0.0-slim
# Install apache2-utils (for htpasswd to bcrypt passwords for the
# local-user-authenticator) and jq.
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
# Print version of gke-gcloud-auth-plugin
RUN gke-gcloud-auth-plugin --version
# Create a non-root user account that can be used to run the tests.
RUN useradd --create-home testrunner
# Install latest beta chrome.
RUN \
chown root:root /tmp && \
chmod 1777 /tmp && \
curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \
echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
apt-get -y update && \
apt-get -y install google-chrome-beta
# Output Chrome version used
RUN google-chrome --version
# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/
ENV PATH /usr/local/go/bin:$PATH
RUN curl -fsSL https://go.dev/dl/go1.23.2.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
tar -C /usr/local -xzf /tmp/go.tar.gz && \
rm /tmp/go.tar.gz && \
go version
ENV GOPATH /go
ENV PATH $GOPATH/bin:$PATH
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
WORKDIR $GOPATH
# Install go tools gotestsum and test2json to record the test output in a nice format.
RUN go install gotest.tools/gotestsum@latest
RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \
mv test2json /usr/local/bin/test2json
# Install Carvel tools.
RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \
-o /bin/kubectl && chmod 0755 /bin/kubectl
# Install aws-iam-authenticator
RUN curl -fsSL \
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
-o /bin/aws-iam-authenticator \
&& chmod 0755 /bin/aws-iam-authenticator
# Install TMC CLI.
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
# | jq -r .versions[].linuxX64 \
# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \
# tmc version
# Install yq.
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
# install latest nmap
RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\
alien nmap-7.92-1.x86_64.rpm &&\
dpkg -i nmap_7.92-2_amd64.deb

View File

@@ -0,0 +1,80 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# For running the integration tests as a client to a k8s cluster
FROM mikefarah/yq:4.44.3 AS yq
# We need gcloud for running integration tests against GKE
# because the kubeconfig uses gcloud as an `auth-provider`.
# Use FROM gcloud-sdk instead of FROM golang because its
# a lot easier to install Go than to install gcloud in the
# subsequent commands below.
FROM google/cloud-sdk:498.0.0-slim
# Install apache2-utils (for htpasswd to bcrypt passwords for the
# local-user-authenticator) and jq.
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
# Print version of gke-gcloud-auth-plugin
RUN gke-gcloud-auth-plugin --version
# Create a non-root user account that can be used to run the tests.
RUN useradd --create-home testrunner
# Install latest stable chrome.
RUN \
chown root:root /tmp && \
chmod 1777 /tmp && \
curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \
echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
apt-get -y update && \
apt-get -y install google-chrome-stable
# Output Chrome version used
RUN google-chrome --version
# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/
ENV PATH /usr/local/go/bin:$PATH
RUN curl -fsSL https://go.dev/dl/go1.23.2.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
tar -C /usr/local -xzf /tmp/go.tar.gz && \
rm /tmp/go.tar.gz && \
go version
ENV GOPATH /go
ENV PATH $GOPATH/bin:$PATH
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
WORKDIR $GOPATH
# Install go tools gotestsum and test2json to record the test output in a nice format.
RUN go install gotest.tools/gotestsum@latest
RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \
mv test2json /usr/local/bin/test2json
# Install Carvel tools.
RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \
-o /bin/kubectl && chmod 0755 /bin/kubectl
# Install aws-iam-authenticator
RUN curl -fsSL \
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
-o /bin/aws-iam-authenticator \
&& chmod 0755 /bin/aws-iam-authenticator
# Install TMC CLI.
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
# | jq -r .versions[].linuxX64 \
# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \
# tmc version
# Install yq.
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
# install latest nmap
RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\
alien nmap-7.92-1.x86_64.rpm &&\
dpkg -i nmap_7.92-2_amd64.deb

View File

@@ -0,0 +1,34 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# For deploying apps onto Kubernetes clusters (including GKE)
FROM google/cloud-sdk:498.0.0-slim
# Install apache2-utils (for htpasswd to bcrypt passwords for the
# local-user-authenticator) and jq.
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps dnsutils google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
# Print version of gke-gcloud-auth-plugin
RUN gke-gcloud-auth-plugin --version
# Install Carvel tools.
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
# Install latest kubectl.
RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
-o /bin/kubectl && chmod u+x /bin/kubectl
# Install aws-iam-authenticator
RUN curl -sfL \
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
-o /bin/aws-iam-authenticator \
&& chmod u+x /bin/aws-iam-authenticator
# Install TMC CLI.
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
#RUN curl -sfL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
# | jq -r .versions[].linuxX64 \
# | xargs curl -sfL -o /bin/tmc && chmod +x /bin/tmc && \
# tmc version

View File

@@ -0,0 +1,20 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
ARG GO_VERSION
FROM golang:${GO_VERSION}
ARG GO_VERSION
ARG K8S_PKG_VERSION
ARG CONTROLLER_GEN_VERSION
ARG CRD_REF_DOCS_COMMIT_SHA
ENV GO_VERSION=$GO_VERSION
ENV K8S_PKG_VERSION=$K8S_PKG_VERSION
ENV CONTROLLER_GEN_VERSION=$CONTROLLER_GEN_VERSION
ENV CRD_REF_DOCS_COMMIT_SHA=$CRD_REF_DOCS_COMMIT_SHA
COPY setup.sh /codegen/
RUN /codegen/setup.sh

View File

@@ -0,0 +1,89 @@
#!/bin/bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [ -z "$GO_VERSION" ]; then
echo "missing GO_VERSION"
exit 1
fi
if [ -z "$K8S_PKG_VERSION" ]; then
echo "missing K8S_PKG_VERSION"
exit 1
fi
if [ -z "$CONTROLLER_GEN_VERSION" ]; then
echo "missing CONTROLLER_GEN_VERSION"
exit 1
fi
# Debugging output for CI...
echo "GO_VERSION: $GO_VERSION"
echo "K8S_PKG_VERSION: $K8S_PKG_VERSION"
echo "CONTROLLER_GEN_VERSION: $CONTROLLER_GEN_VERSION"
echo "CRD_REF_DOCS_COMMIT_SHA: $CRD_REF_DOCS_COMMIT_SHA"
apt-get update -y && apt-get dist-upgrade -y
cd /codegen/
cat <<EOF >tools.go
package tools
import (
_ "k8s.io/apimachinery/pkg/apis/meta/v1"
_ "k8s.io/api/core/v1"
_ "k8s.io/code-generator"
)
EOF
cat <<EOF >go.mod
module codegen
go 1.21
require (
k8s.io/apimachinery v$K8S_PKG_VERSION
k8s.io/code-generator v$K8S_PKG_VERSION
k8s.io/api v$K8S_PKG_VERSION
)
EOF
# Resolve dependencies and download the modules.
go mod tidy
go mod download
# Copy the downloaded source code of k8s.io/code-generator so we can "go install" all its commands.
rm -rf "$(go env GOPATH)/src"
mkdir -p "$(go env GOPATH)/src/k8s.io"
cp -pr "$(go env GOMODCACHE)/k8s.io/code-generator@v$K8S_PKG_VERSION" "$(go env GOPATH)/src/k8s.io/code-generator"
# Install the commands to $GOPATH/bin. Also sed the related shell scripts, but leave those in the src dir.
# Note that update-codegen.sh invokes these shell scripts at this src path.
# The sed is a dirty hack to avoid having the code-generator shell scripts run go install again.
# In version 0.23.0 the line inside the shell script that previously said "go install ..." started
# to instead say "GO111MODULE=on go install ..." so this sed is a little wrong, but still seems to work.
(cd "$(go env GOPATH)/src/k8s.io/code-generator" &&
go install -v ./cmd/... &&
sed -i -E -e 's/(go install.*)/# \1/g' ./*.sh)
if [[ ! -f "$(go env GOPATH)/bin/openapi-gen" ]]; then
# Starting in Kube 1.30, openapi-gen moved from k8s.io/code-generator to k8s.io/kube-openapi.
# Assuming that we are still in the /codegen directory, get the specific version of kube-openapi
# that is selected as an indirect dependency by the go.mod.
kube_openapi_version=$(go list -m k8s.io/kube-openapi | cut -f2 -d' ')
# Install that version of its openapi-gen command.
go install -v "k8s.io/kube-openapi/cmd/openapi-gen@$kube_openapi_version"
fi
go install -v sigs.k8s.io/controller-tools/cmd/controller-gen@v$CONTROLLER_GEN_VERSION
# We use a commit sha instead of a release semver because this project does not create
# releases very often. They seem to only release 1-2 times per year, but commit to
# main more often.
go install -v github.com/elastic/crd-ref-docs@$CRD_REF_DOCS_COMMIT_SHA
# List all the commands that we just installed.
echo "Installed the following commands to $(go env GOPATH)/bin:"
ls "$(go env GOPATH)/bin"

View File

@@ -0,0 +1,17 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# We would like to use https://github.com/cfmobile/pool-trigger-resource for our pool recycle jobs.
# Unfortuntely, the pool-trigger-resource repo seems like it is not maintained by anyone. The most recent
# commit was six years ago. On the other hand, its implementation is a shell script which basically
# just calls some git commands, so it shouldn't need much maintaince if it works.
# This is an updated version of https://github.com/cfmobile/pool-trigger-resource/blob/master/Dockerfile
# to use newer versions of linux, jq, and git. The "assets" directory's source code is copied from
# https://github.com/cfmobile/pool-trigger-resource/tree/master/assets as of commit efefe018c88e937.
FROM debian:12.7-slim
RUN apt-get update && apt-get install -y ca-certificates jq git && rm -rf /var/lib/apt/lists/*
ADD assets/ /opt/resource/
RUN chmod +rx /opt/resource/*

View File

@@ -0,0 +1,219 @@
#!/bin/sh
# vim: set ft=sh
set -e
exec 3>&1 # make stdout available as fd 3 for the result
exec 1>&2 # redirect all output to stderr for logging
# shellcheck source=./common.sh
. "$(dirname "$0")"/common.sh
# for jq
PATH=/usr/local/bin:$PATH
payload=$TMPDIR/git-resource-request
cat > "$payload" <&0
uri=$(jq -r '.source.uri // ""' < "$payload")
branch=$(jq -r '.source.branch // ""' < "$payload")
pool_name=$(jq -r '.source.pool // ""' < "$payload")
ref=$(jq -r '.version.ref // ""' < "$payload")
if [ -z "$uri" ]; then
config_errors="${config_errors}invalid payload (missing uri)
"
fi
if [ -z "$branch" ]; then
config_errors="${config_errors}invalid payload (missing branch)
"
fi
if [ -z "$pool_name" ]; then
config_errors="${config_errors}invalid payload (missing pool)
"
fi
if [ -n "$config_errors" ]; then
echo "$config_errors"
exit 1
fi
###########
#
# end processing inputs
#
###########
###########
#
# start git setup
#
###########
load_pubkey "$payload"
destination=$TMPDIR/git-resource-repo-cache
if [ -d "$destination" ]; then
cd "$destination"
git fetch
git reset --hard FETCH_HEAD
else
branchflag=""
if [ -n "$branch" ]; then
branchflag="--branch $branch"
fi
git clone "$uri" $branchflag "$destination"
cd "$destination"
fi
git config user.name "CI Pool Trigger Resource"
git config user.email "ci-pool-trigger@localhost"
###########
#
# end git setup
#
###########
###########
#
# start calculating pending triggers
#
###########
if [ -n "$ref" ] && git cat-file -e "$ref"; then
ref_exists_and_is_valid=yes
fi
if [ -e "$pool_name/.pending-triggers" ] && [ -e "$pool_name/.pending-removals" ]; then
tally_files_exist=yes
#check validity of tally files
fi
if [ -n "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then
files_changed=$(git show --pretty="format:" --name-status -r "$ref"..HEAD -- "$pool_name"/unclaimed/)
set +e
added_items=$(echo "$files_changed" | grep "^A")
removed_items=$(echo "$files_changed" | grep "^D")
set -e
if [ -n "$added_items" ]; then
num_added_items=$(echo "$added_items" | wc -l)
else
num_added_items=0
fi
if [ -n "$removed_items" ]; then
num_removed_items=$(echo "$removed_items" | wc -l)
else
num_removed_items=0
fi
old_pending_triggers=$(cat "$pool_name"/.pending-triggers)
old_pending_removals=$(cat "$pool_name"/.pending-removals)
pending_triggers=$(( old_pending_triggers + num_added_items ))
if [ "$num_removed_items" -gt "$old_pending_removals" ]; then
extra_removals=$(( num_removed_items - old_pending_removals ))
pending_removals=0
pending_triggers=$(( pending_triggers - extra_removals ))
else
pending_removals=$(( old_pending_removals - num_removed_items ))
fi
else
pending_triggers=$(find "$pool_name"/unclaimed -not -path "*/\.*" -path "$pool_name/unclaimed/*"| wc -l)
pending_removals=0
fi
###########
#
# end calculating pending triggers
#
###########
###########
#
# start handling results
#
###########
if [ "$pending_triggers" -gt 0 ]; then
last_commit=$(git log -1 --pretty='format:%H')
result=$(echo "$last_commit" | jq -R '.' | jq -s "map({ref: .})")
else
result="[]"
fi
###########
#
# end handling results
#
###########
###########
#
# start updating triggers
#
###########
if [ "$pending_triggers" -gt 0 ]; then
new_pending_triggers=$(( pending_triggers - 1 ))
new_pending_removals=$(( pending_removals + 1 ))
echo "$new_pending_triggers" > "$pool_name"/.pending-triggers
echo "$new_pending_removals" > "$pool_name"/.pending-removals
git add "$pool_name"/.pending*
commit_message="triggering build with pending triggers: $new_pending_triggers; pending removals: $new_pending_removals"
if [ -n "$ref_exists_and_is_valid" ] && [ -z "$tally_files_exist" ]; then
commit_message="$commit_message
.pending-triggers and/or .pending-removals are missing - re-initializing resource"
elif [ -z "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then
commit_message="$commit_message
resource initialized with pre-existing .pending-triggers and .pending-removals - ignoring"
elif [ -z "$ref_exists_and_is_valid" ]; then
commit_message="$commit_message
initializing tally files"
fi
if [ -n "$added_items" ]; then
commit_message="$commit_message
additions:
$added_items"
fi
if [ -n "$removed_items" ]; then
commit_message="$commit_message
removals:
$removed_items"
fi
git commit --allow-empty -m "$commit_message"
git push
fi
###########
#
# end updating triggers
#
###########
echo "$result" >&3

View File

@@ -0,0 +1,28 @@
#!/bin/sh
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
export TMPDIR=${TMPDIR:-/tmp}
load_pubkey() {
local private_key_path=$TMPDIR/git-resource-private-key
(jq -r '.source.private_key // empty' < "$1") > "$private_key_path"
if [ -s "$private_key_path" ]; then
chmod 0600 "$private_key_path"
eval "$(ssh-agent)" >/dev/null 2>&1
trap 'kill $SSH_AGENT_PID' 0
ssh-add "$private_key_path" >/dev/null 2>&1
mkdir -p ~/.ssh
cat > ~/.ssh/config <<EOF
StrictHostKeyChecking no
LogLevel quiet
EOF
chmod 0600 ~/.ssh/config
fi
}

View File

@@ -0,0 +1,2 @@
#!/bin/sh
cat

View File

@@ -0,0 +1,2 @@
#!/bin/sh
cat

View File

@@ -0,0 +1,4 @@
# Copyright 2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
FROM bitnami/openldap:2.6.8

View File

@@ -0,0 +1,28 @@
# Copyright 2021-2023 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# The cfssl/cfssl container image on dockerhub is built poorly.
# For every arch, the image contains /bin/* binaries for amd64.
# Therefore, we cannot use bash on arm64 inside this container image.
# This was observed in cfssl/cfssl:v1.6.4.
# However, they do compile their own binaries for both arm64 and amd64,
# so we can just copy their binaries into a vanilla linux base image.
FROM cfssl/cfssl:v1.6.5 as cfssl
# We just need any basic unix with bash, but we can pick the same
# base image that they use, just in case they did any dynamic linking.
FROM golang:1.23
# Thier Docerfile https://github.com/cloudflare/cfssl/blob/master/Dockerfile
# calls their Makefile https://github.com/cloudflare/cfssl/blob/master/Makefile
# which builds several binaries. Copy them all.
COPY --from=cfssl /usr/bin/cf* /usr/local/bin
COPY --from=cfssl /usr/bin/mkbundle /usr/local/bin
COPY --from=cfssl /usr/bin/multirootca /usr/local/bin
# Their Dockerfile also populates this directory, so copy that too.
COPY --from=cfssl /etc/cfssl /etc/cfssl
# These lines are copied from the cfssl Dockerfile.
EXPOSE 8888
ENTRYPOINT ["cfssl"]
CMD ["--help"]

View File

@@ -0,0 +1,4 @@
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
FROM ghcr.io/dexidp/dex:v2.41.1

View File

@@ -0,0 +1,13 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Use a runtime image based on Debian slim
FROM debian:12.7-slim
# Install Squid and drop in a very basic, open proxy configuration.
RUN apt-get update && apt-get install -y squid
COPY squid.conf /etc/squid/squid.conf
EXPOSE 3128
# Launch Squid as a foreground process.
CMD squid -N -C -n proxy -d1 2>&1

View File

@@ -0,0 +1,56 @@
## listen on TCP 3128
http_port 3128
## Prevent caching anything (pass through only)
cache deny all
## Allow all connections.
http_access allow all
## Where does Squid log to?
cache_store_log none
cache_log /dev/null
access_log daemon:/var/log/squid/access.log squid
access_log syslog:user.info squid
## When logging, web auditors want to see the full uri, even with the query terms
strip_query_terms off
## Keep 7 days of logs
logfile_rotate 7
## How much RAM, in MB, to use for cache? Default since squid 3.1 is 256 MB
cache_mem 8 MB
## Maximum size of individual objects to store in cache
maximum_object_size 1 MB
## Amount of data to buffer from server to client
read_ahead_gap 64 KB
## Number of file descriptors to support (default is 2**20 which takes up ~408 MB of memory)
max_filedescriptors 65536
## Drop X-Forwarded-For headers
forwarded_for delete
## Suppress sending squid version information
httpd_suppress_version_string on
## How long to wait when shutting down squid
shutdown_lifetime 10 seconds
## What hostname to display? (defaults to system hostname)
visible_hostname proxy
## Drop some response headers that Squid normally adds (just being paranoid here)
reply_header_access Server deny all
reply_header_access Via deny all
reply_header_access X-Cache deny all
reply_header_access X-Cache-Lookup deny all
reply_header_access X-Squid-Error deny all
## Drop denied connections with just a TCP reset (no error page that might leak info)
deny_info TCP_RESET all
dns_v4_first off

View File

@@ -0,0 +1,4 @@
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
FROM bitnami/kubectl:latest

38
hack/approve-and-merge.sh Executable file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
repo=vmware-tanzu/pinniped
current_branch_name=$(git rev-parse --abbrev-ref HEAD)
if [[ "$current_branch_name" != "ci" ]]; then
echo "error: this script should only be used on the ci branch"
exit 1
fi
# Print the list of PRs to the screen.
PAGER='' gh pr list --base ci --repo $repo --limit 1000
# Exit if there are no PRs found.
count_prs=$(gh pr list --base ci --repo $repo --jq ". | length" --json "number")
if [[ "${count_prs}" == "0" ]]; then
exit 0
fi
read -p "Do you wish to approve and merge these PRs for the ci branch? y/n: " yn
case $yn in
[Yy]* );;
* ) exit 0;;
esac
gh pr list --base ci --repo $repo --json="number" --jq ".[] | .number" \
| xargs -I{} gh pr review {} --approve
gh pr list --base ci --repo $repo --json="number" --jq ".[] | .number" \
| xargs -I{} gh pr merge {} --merge --delete-branch
echo "now pulling the merged commits"
git pull --rebase --autostash

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if ! [ -x "$(command -v gcloud)" ]; then
echo 'Error: Google Cloud SDK (gcloud) is not installed (see https://cloud.google.com/sdk/docs/quickstarts).' >&2
exit 1
fi
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
# Create (or recreate) a GKE acceptance cluster.
# Pro tip: The GCP Console UI can help you build this command.
# The following fields were customized, and all of the others are left as the GCP Console's defaults:
# - Cluster name
# - Cluster version - newest at the time
# - Num nodes - sized smaller to be cheaper
# - Maintenance window start and recurrence - to avoid downtime during business hours
# - Issue client certificate - to make it possible to use an admin kubeconfig without the GKE auth plugin
gcloud container --project "$PINNIPED_GCP_PROJECT" clusters create "gke-acceptance-cluster" \
--zone "us-central1-c" --no-enable-basic-auth --cluster-version "1.30.4-gke.1348000" --release-channel "regular" \
--machine-type "e2-medium" \
--image-type "COS_CONTAINERD" --disk-type "pd-balanced" --disk-size "100" --metadata disable-legacy-endpoints=true \
--scopes "https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \
--num-nodes "1" \
--logging=SYSTEM,WORKLOAD --monitoring=SYSTEM,STORAGE,POD,DEPLOYMENT,STATEFULSET,DAEMONSET,HPA,CADVISOR,KUBELET \
--enable-ip-alias \
--network "projects/$PINNIPED_GCP_PROJECT/global/networks/default" \
--subnetwork "projects/$PINNIPED_GCP_PROJECT/regions/us-central1/subnetworks/default" \
--no-enable-intra-node-visibility \
--default-max-pods-per-node "110" \
--security-posture=standard --workload-vulnerability-scanning=disabled --no-enable-master-authorized-networks \
--addons HorizontalPodAutoscaling,HttpLoadBalancing,GcePersistentDiskCsiDriver \
--enable-autoupgrade --enable-autorepair --max-surge-upgrade 1 --max-unavailable-upgrade 0 \
--binauthz-evaluation-mode=DISABLED --enable-managed-prometheus --enable-shielded-nodes --node-locations "us-central1-c" \
--maintenance-window-start "2020-07-01T03:00:00Z" --maintenance-window-end "2020-07-01T11:00:00Z" \
--maintenance-window-recurrence "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA,SU" \
--issue-client-certificate

69
hack/edit-gcloud-secret.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -e
if [ -z "$1" ]; then
echo "usage: $0 SECRET_NAME"
exit 1
fi
set -u
if ! command -v yq &> /dev/null; then
echo "Please install the yq CLI"
exit 1
fi
if ! command -v delta &> /dev/null; then
echo "Please install the delta CLI (brew install git-delta)"
exit 1
fi
if ! command -v gcloud &> /dev/null; then
echo "Please install the gcloud CLI"
exit 1
fi
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
# Create a temporary directory for secrets, cleaned up at the end of this script.
trap 'rm -rf "$TEMP_DIR"' EXIT
TEMP_DIR=$(mktemp -d) || exit 1
# Grab the current version.
echo "Downloading the latest version of '$1'..."
gcloud secrets versions access latest --secret="$1" --project "$PINNIPED_GCP_PROJECT" > "$TEMP_DIR/$1.yaml"
# Use yq to format the YAML into a consistent style.
# TODO: there is a bug in yq that strips leading comments on the first lines of a file when -P is used.
# For now, we'll skip the pretty-printing.
# yq eval -i -P '.' "$TEMP_DIR/$1.yaml"
yq eval -i '.' "$TEMP_DIR/$1.yaml"
cp "$TEMP_DIR/$1.yaml" "$TEMP_DIR/$1-original.yaml"
# Invoke $EDITOR to modify the file.
${EDITOR:-vim} "$TEMP_DIR/$1.yaml"
# Format the output from the editor just as we did before the edit.
# TODO: there is a bug in yq that strips leading comments on the first lines of a file when -P is used.
# For now, we'll skip the pretty-printing.
# yq eval -i -P '.' "$TEMP_DIR/$1.yaml"
yq eval -i '.' "$TEMP_DIR/$1.yaml"
# Dump the diff using git-delta.
( cd "$TEMP_DIR" && delta "$1-original.yaml" "$1.yaml" || true )
read -p "Save as new version of '$1' [yN]: " -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
gcloud secrets versions add "$1" --data-file "$TEMP_DIR/$1.yaml" --project "$PINNIPED_GCP_PROJECT"
fi

58
hack/fly-helpers.sh Normal file
View File

@@ -0,0 +1,58 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Some global fly config.
#
export FLY_CLI=/usr/local/bin/fly
export CONCOURSE_URL=https://ci.pinniped.dev
export CONCOURSE_TEAM=main
export CONCOURSE_TARGET=pinniped
export ROOT_DIR
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.."
#
# Some helper functions for the update-pipeline scripts to use.
#
function set_pipeline() {
# Ensure that fly is installed/upgraded/configured.
"$ROOT_DIR"/hack/setup-fly.sh
# Ensure that the user is authenticated with gcloud.
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\` and try again."
exit 1
fi
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
# Local vars.
local pipeline_name=$1
local pipeline_file=$2
local gcloud_project="$PINNIPED_GCP_PROJECT"
local gcloud_secret_name=concourse-secrets
# Create/update the pipeline.
$FLY_CLI --target "$CONCOURSE_TARGET" set-pipeline \
--pipeline "$pipeline_name" \
--config "$pipeline_file" \
--load-vars-from <(gcloud secrets versions access latest \
--secret="$gcloud_secret_name" \
--project "$gcloud_project")
}
function ensure_time_resource_has_at_least_one_version() {
local pipeline_name=$1
local resource_name=$2
# Force the specified time resource to have at least one version. Idempotent.
# For a new pipeline, a time resource will have no versions until the specified time has occurred.
# For example, a once-per-night time resource will have no versions until that time
# has passed on the first night.
$FLY_CLI --target "$CONCOURSE_TARGET" check-resource \
--resource "$pipeline_name/$resource_name" \
--from "time:2000-01-01T00:00:00Z" >/dev/null
}

43
hack/get-aws-ad-env-vars.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# To be run before local integration tests.
# From the pinniped repo:
# hack/prepare-for-integration-tests.sh --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
function _get_concourse_secret {
gcloud secrets versions access latest --secret="concourse-secrets" --project "$PINNIPED_GCP_PROJECT" | yq e "$1"
}
export PINNIPED_TEST_AD_HOST="$(_get_concourse_secret '.aws-ad-host')"
export PINNIPED_TEST_AD_DOMAIN="$(_get_concourse_secret '.aws-ad-domain')"
export PINNIPED_TEST_AD_BIND_ACCOUNT_USERNAME="$(_get_concourse_secret '.aws-ad-bind-account-username')"
export PINNIPED_TEST_AD_BIND_ACCOUNT_PASSWORD="$(_get_concourse_secret '.aws-ad-bind-account-password')"
export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME="objectGUID"
export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE="$(_get_concourse_secret '.aws-ad-user-unique-id-attribute-value')"
export PINNIPED_TEST_AD_USER_USER_PRINCIPAL_NAME="$(_get_concourse_secret '.aws-ad-user-userprincipalname')"
export PINNIPED_TEST_AD_USER_PASSWORD="$(_get_concourse_secret '.aws-ad-user-password')"
export PINNIPED_TEST_AD_LDAPS_CA_BUNDLE="$(_get_concourse_secret '.aws-ad-ca-data')"
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_DN="$(_get_concourse_secret '.aws-ad-expected-direct-groups-dn')"
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_CN="$(_get_concourse_secret '.aws-ad-expected-direct-groups-cn')"
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME="$(_get_concourse_secret '.aws-ad-expected-direct-and-nested-groups-samaccountnames')"
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES="$(_get_concourse_secret '.aws-ad-expected-direct-and-nested-groups-samaccountname-domainnames')"
export PINNIPED_TEST_DEACTIVATED_AD_USER_SAMACCOUNTNAME="$(_get_concourse_secret '.aws-ad-deactivated-user-samaccountname')"
export PINNIPED_TEST_DEACTIVATED_AD_USER_PASSWORD="$(_get_concourse_secret '.aws-ad-deactivated-user-password')"
export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_NAME="mail"
export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_VALUE="$(_get_concourse_secret '.aws-ad-user-email-attribute-value')"
export PINNIPED_TEST_AD_DEFAULTNAMINGCONTEXT_DN="$(_get_concourse_secret '.aws-ad-defaultnamingcontext')"
export PINNIPED_TEST_AD_USERS_DN="$(_get_concourse_secret '.aws-ad-users-dn')"
unset -f _get_concourse_secret

39
hack/get-github-env-vars.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# To be run before local integration tests.
# From the pinniped repo:
# hack/prepare-for-integration-tests.sh --get-github-vars "../pinniped-ci-branch/hack/get-github-env-vars.sh"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
function _get_concourse_secret {
gcloud secrets versions access latest --secret="concourse-secrets" --project "$PINNIPED_GCP_PROJECT" | yq e "$1"
}
export PINNIPED_TEST_GITHUB_APP_CLIENT_ID="$(_get_concourse_secret '.github-app-client-id')"
export PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET="$(_get_concourse_secret '.github-app-client-secret')"
export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID="$(_get_concourse_secret '.github-oauth-app-client-id')"
export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET="$(_get_concourse_secret '.github-oauth-app-client-secret')"
export PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL="$(_get_concourse_secret '.github-oauth-app-allowed-callback-url')"
export PINNIPED_TEST_GITHUB_USER_USERNAME="$(_get_concourse_secret '.github-username')"
export PINNIPED_TEST_GITHUB_USER_PASSWORD="$(_get_concourse_secret '.github-password')"
export PINNIPED_TEST_GITHUB_USER_OTP_SECRET="$(_get_concourse_secret '.github-user-otp-secret')"
export PINNIPED_TEST_GITHUB_USERID="$(_get_concourse_secret '.github-userid')"
export PINNIPED_TEST_GITHUB_ORG="$(_get_concourse_secret '.github-org')"
export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES="$(_get_concourse_secret '.github-expected-team-names')"
export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS="$(_get_concourse_secret '.github-expected-team-slugs')"
unset -f _get_concourse_secret

20
hack/list-all-running-jobs.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Define some env vars
source "$script_dir/fly-helpers.sh"
# Setup and login if needed
"$ROOT_DIR"/hack/setup-fly.sh
# List all jobs that are currently running in CI.
# An empty result means that there are no jobs running.
for p in $($FLY_CLI --target "$CONCOURSE_TARGET" pipelines --json | jq -r ".[].name"); do
$FLY_CLI --target "$CONCOURSE_TARGET" jobs -p "$p" --json | jq -r ".[] | select(.next_build.status == \"started\") | (\"$p/\" + .name)"
done

37
hack/pinniped-pre-commit.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
pinniped_ci_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
pinniped_path="${1-$PWD}"
pinniped_ci_path="${2-$pinniped_ci_root}"
cd "$pinniped_path" || exit 1
if [[ ! -f "./hack/module.sh" ]]; then
echo "$pinniped_path does not appear to be the path to the source code repo directory"
exit 1
fi
if [[ ! -f "$pinniped_ci_path/hack/run-integration-tests.sh" ]]; then
echo "$pinniped_ci_path does not appear to be the path to the ci repo directory"
exit 1
fi
echo
echo "Running linters..."
./hack/module.sh lint
echo
echo "Running units..."
./hack/module.sh unittest
echo
echo "Running integrations..."
"$pinniped_ci_path"/hack/run-integration-tests.sh --from-clean-cluster
echo
echo "ALL TESTS PASSED"

View File

@@ -0,0 +1,139 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# This script can be used to prepare a kind cluster and deploy the app
# in preparation for running the uninstall test.
# It will also output instructions on how to run the uninstall test.
set -euo pipefail
help=no
skip_build=no
pinniped_ci_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
PARAMS=""
while (("$#")); do
case "$1" in
-h | --help)
help=yes
shift
;;
-s | --skip-build)
skip_build=yes
shift
;;
-*)
echo "Error: Unsupported flag $1" >&2
exit 1
;;
*)
PARAMS="$PARAMS $1"
shift
;;
esac
done
eval set -- "$PARAMS"
if [[ "$help" == "yes" ]]; then
me="$(basename "${BASH_SOURCE[0]}")"
echo "Usage:"
echo " $me [flags] [path/to/pinniped] [path/to/pinniped-ci-branch]"
echo
echo " path/to/pinniped default: \$PWD ($PWD)"
echo " path/to/pinniped-ci-branch default: the parent directory of this script ($pinniped_ci_root)"
echo
echo "Flags:"
echo " -h, --help: print this usage"
echo " -s, --skip-build: reuse the most recently built image of the app instead of building"
exit 1
fi
pinniped_path="${1-$PWD}"
pinniped_ci_path="${2-$pinniped_ci_root}"
if ! command -v kind >/dev/null; then
echo "Please install kind. e.g. 'brew install kind' for MacOS"
exit 1
fi
if ! command -v ytt >/dev/null; then
log_error "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS"
exit 1
fi
if ! command -v kapp >/dev/null; then
log_error "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS"
exit 1
fi
if ! command -v kubectl >/dev/null; then
log_error "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
exit 1
fi
cd "$pinniped_path" || exit 1
if [[ ! -f Dockerfile || ! -d deploy ]]; then
echo "$pinniped_path does not appear to be the path to the source code repo directory"
exit 1
fi
if [[ ! -d "$pinniped_ci_path/pipelines/shared-helpers" ]]; then
echo "$pinniped_ci_path does not appear to be the path to the ci repo directory"
exit 1
fi
echo "Deleting running kind clusters to prepare a clean slate for the install+uninstall test..."
kind delete cluster --name pinniped
echo "Creating a kind cluster..."
kind create cluster --name pinniped
registry="docker.io"
repo="test/build"
registry_repo="$registry/$repo"
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
if [[ "$skip_build" == "yes" ]]; then
most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1)
if [[ -n "$most_recent_tag" ]]; then
tag="$most_recent_tag"
do_build=no
else
# Oops, there was no previous build. Need to build anyway.
do_build=yes
fi
else
do_build=yes
fi
registry_repo_tag="${registry_repo}:${tag}"
if [[ "$do_build" == "yes" ]]; then
# Rebuild the code
echo "Docker building the app..."
docker build . --tag "$registry_repo_tag"
fi
# Load it into the cluster
echo "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag" --name pinniped
cat <<EOF >/tmp/uninstall-test-env
# The following env vars should be set before running $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh
export IMAGE_REPO="$registry_repo"
export IMAGE_TAG="$tag"
EOF
echo "Done!"
echo
echo "Ready to run an uninstall test."
echo " cd $pinniped_path"
echo "Then either"
echo " source /tmp/uninstall-test-env && $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh"
echo "or"
echo " source /tmp/uninstall-test-env && $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh"
echo
echo "When you're finished, use 'kind delete cluster --name pinniped to tear down the cluster."

View File

@@ -0,0 +1,248 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Assuming that you have somehow got your hands on a remote GKE or kind cluster,
# and that you have an admin kubeconfig file for it,
# and that you have already built/pushed the Pinniped container image that you would like to test,
# then you can use this script to deploy in preparation for integration or manual testing.
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
#
# Handle argument parsing and help message
#
help=no
kubeconfig=""
image_tag=""
image_repo=""
pinniped_repo=""
cluster_type=""
image_digest=""
while (("$#")); do
case "$1" in
-h | --help)
help=yes
shift
;;
-k | --kubeconfig)
shift
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "-k|--kubeconfig requires a kubeconfig path to be specified"
exit 1
fi
kubeconfig=$1
shift
;;
-t | --image-tag)
shift
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "-t|--image-tag requires a tag to be specified"
exit 1
fi
image_tag=$1
shift
;;
-d | --image-digest)
shift
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--d|--image-digest requires a digest to be specified"
exit 1
fi
image_digest=$1
shift
;;
-r | --image-repo)
shift
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "-r|--image-repo requires an image repo to be specified"
exit 1
fi
image_repo=$1
shift
;;
-p | --pinniped-repo)
shift
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "-p|--pinniped-repo requires a path to the pinniped repo to be specified"
exit 1
fi
pinniped_repo=$1
shift
;;
-c | --cluster-type)
shift
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "-c|--cluster-type requires the type of the cluster to be specified"
exit 1
fi
cluster_type=$1
shift
;;
-*)
log_error "Unsupported flag $1" >&2
exit 1
;;
*)
log_error "Unsupported positional arg $1" >&2
exit 1
;;
esac
done
# Note that if you are using a remote kind cluster then it might be more convenient to use this public repo:
# ghcr.io/pinniped-ci-bot/manual-test-pinniped-images
# You can give yourself permission to push to that repo at:
# https://github.com/users/pinniped-ci-bot/packages/container/manual-test-pinniped-images/settings
default_image_repo="gcr.io/$PINNIPED_GCP_PROJECT/manual-test-pinniped-images"
default_image_tag="latest"
if [[ "$help" == "yes" ]]; then
me="$(basename "${BASH_SOURCE[0]}")"
log_note "Usage:"
log_note " $me [flags]"
log_note
log_note "Flags:"
log_note " -h, --help: print this usage"
log_note " -k, --kubeconfig: path to the kubeconfig for your cluster (required)"
log_note " -c, --cluster-type: the type of cluster targeted by the kubeconfig, either 'gke' or 'kind' (required)"
log_note " -r, --image-repo: image registry/repository for Pinniped server container image to deploy (default: $default_image_repo)"
log_note " -t, --image-tag: image tag for Pinniped server container image to deploy (default: $default_image_tag)"
log_note " -d, --image-digest: image digest for Pinniped server container image to deploy. Takes precedence over --image-tag."
log_note " -p, --pinniped-repo: path to pinniped git repo (default: a sibling directory called pinniped)"
exit 1
fi
if [[ "$kubeconfig" == "" ]]; then
log_error "no kubeconfig set. -k|--kubeconfig is a required option."
exit 1
fi
if [[ "$kubeconfig" != "/"* ]]; then
# If it looks like a relative path then make an an absolute path because we are going to pushd below.
kubeconfig="$(pwd)/$kubeconfig"
fi
if [[ ! -f "$kubeconfig" ]]; then
log_error "specified kubeconfig file does not exist: $kubeconfig"
exit 1
fi
if [[ "$cluster_type" != "gke" && "$cluster_type" != "kind" && "$cluster_type" != "aks" && "$cluster_type" != "eks" ]]; then
log_error "specified cluster type must be 'kind', 'eks', 'aks', or 'gke'. -c|--cluster-type is a required option."
exit 1
fi
if [[ "$pinniped_repo" == "" ]]; then
pinniped_repo="$ROOT/../pinniped"
log_note "no pinniped repo path set, defaulting to $pinniped_repo"
fi
if [[ ! (-d "$pinniped_repo" && -d "$pinniped_repo/deploy" && -d "$pinniped_repo/test/cluster_capabilities") ]]; then
log_error "$pinniped_repo does not appear to contain the pinniped source code repo"
fi
if [[ "$image_repo" == "" ]]; then
image_repo="$default_image_repo"
log_note "no image repo set, defaulting to $image_repo"
fi
if [[ "$image_tag" == "" ]]; then
image_tag="$default_image_tag"
log_note "no image tag set, defaulting to $image_tag"
fi
cluster_capabilities_path="$pinniped_repo/test/cluster_capabilities/$cluster_type.yaml"
if [[ ! -f "$cluster_capabilities_path" ]]; then
log_error "cluster type capabilities file does not exist: $cluster_capabilities_path"
exit 1
fi
check_dependency ytt "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS"
check_dependency kapp "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS"
check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
check_dependency openssl "Please install openssl. Should be pre-installed on MacOS."
check_dependency nmap "Please install nmap. e.g. 'brew install nmap' for MacOS"
#
# Finished checking arguments and dependencies. Now actually do the work...
#
export KUBECONFIG="$kubeconfig"
export IMAGE_TAG="$image_tag"
export IMAGE_REPO="$image_repo"
if [[ "$image_digest" != "" ]]; then
export IMAGE_DIGEST="$image_digest"
fi
pushd "$pinniped_repo" >/dev/null
PINNIPED_TEST_CLUSTER_CAPABILITY_FILE="${cluster_capabilities_path}" \
DEPLOY_LOCAL_USER_AUTHENTICATOR=yes \
DEPLOY_TEST_TOOLS=yes \
CONCIERGE_APP_NAME="concierge" \
CONCIERGE_NAMESPACE="concierge" \
SUPERVISOR_APP_NAME="supervisor" \
SUPERVISOR_NAMESPACE="supervisor" \
USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR="yes" \
"$ROOT/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh"
popd >/dev/null
log_note
log_note "🚀 Ready to run integration tests! For example..."
case "$cluster_type" in
gke | aks | eks)
log_note "KUBECONFIG='$KUBECONFIG' TEST_ENV_PATH='/tmp/integration-test-env' SOURCE_PATH='$pinniped_repo' $ROOT/pipelines/shared-tasks/run-integration-tests/task.sh"
;;
kind)
log_note "KUBECONFIG='$KUBECONFIG' TEST_ENV_PATH='/tmp/integration-test-env' SOURCE_PATH='$pinniped_repo' START_GCLOUD_PROXY=yes GCP_PROJECT=$PINNIPED_GCP_PROJECT GCP_ZONE=us-central1-b $ROOT/pipelines/shared-tasks/run-integration-tests/task.sh"
;;
*)
log_error "Huh? Should never get here."
;;
esac

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Create a VM called $instance_name with some reasonable compute power and disk.
echo "Creating VM with name $instance_name..."
gcloud compute instances create "$instance_name" \
--project="$project" --zone="$zone" \
--machine-type="e2-standard-8" \
--boot-disk-size="40GB" --boot-disk-type="pd-ssd" --boot-disk-device-name="$instance_name"
# Give a little time for the server to be ready.
while true; do
sleep 5
if ! "$here"/ssh.sh ls; then
echo "Waiting for VM to be accessible via ssh..."
else
echo "VM ready!"
break
fi
done
# Copy the deps script to the new VM.
echo "Copying deps.sh to $instance_name..."
gcloud compute scp "$here"/lib/deps.sh "$instance_user@$instance_name":/tmp \
--project="$project" --zone="$zone"
# Run the deps script on the new VM.
"$here"/ssh.sh /tmp/deps.sh

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
# Delete the instance forever. Will prompt for confirmation.
echo "Destroying VM $instance_name..."
gcloud compute instances delete "$instance_name" \
--delete-disks="all" \
--project="$project" --zone="$zone"

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env bash
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -exuo pipefail
# Start in the user's home directory.
cd
# Install brew pre-reqs documented at https://docs.brew.sh/Homebrew-on-Linux#requirements
sudo apt-get update && sudo sudo apt-get install build-essential procps curl file git -y
# Brew installer command from https://brew.sh. Note that CI=1 turns off an interactive prompt.
CI=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
# The installer prints more instructions. It advises you to add brew to profile and install gcc.
echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"' >>$HOME/.profile
eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
brew install gcc
# Install go.
brew install go
# On linux go really wants gcc5 to also be installed for some reason.
brew install gcc@5
# Get the Go linter.
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.1
# Install and configure zsh and plugins.
brew install zsh zsh-history-substring-search
brew install fasd fzf
/home/linuxbrew/.linuxbrew/opt/fzf/install --all --no-bash --no-fish
# Install https://ohmyz.sh
export PATH=$PATH:/home/linuxbrew/.linuxbrew/bin
CHSH=no RUNZSH=no KEEP_ZSHRC=yes sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
# Install some plugins.
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git "$HOME"/.oh-my-zsh/custom/themes/powerlevel10k
git clone https://github.com/zsh-users/zsh-autosuggestions "$HOME"/.oh-my-zsh/custom/plugins/zsh-autosuggestions
git clone https://github.com/TamCore/autoupdate-oh-my-zsh-plugins "$HOME"/.oh-my-zsh/plugins/autoupdate
git clone https://github.com/zdharma-continuum/fast-syntax-highlighting.git "$HOME"/.oh-my-zsh/custom/plugins/fast-syntax-highlighting
# Get decent .zshrc and .p10k.zsh files.
curl -fsSL https://gist.githubusercontent.com/cfryanr/c84ca9e3fe519b5a7f07426ecc7e3a7c/raw >"$HOME"/.zshrc
curl -fsSL https://gist.githubusercontent.com/cfryanr/3e55b770b9be485bd8671377ce04a3f1/raw >"$HOME"/.p10k.zsh
# Change the user's default shell.
sudo chsh -s /home/linuxbrew/.linuxbrew/bin/zsh "$USER"
# Get some other useful config files.
curl -fsSL https://gist.githubusercontent.com/cfryanr/153e167a1f2c20934fbc4dc32bbec8f2/raw >"$HOME"/.gitconfig
curl -fsSL https://gist.githubusercontent.com/cfryanr/80ada8af9a78f08b368327401ea80b6c/raw >"$HOME"/.git-authors
# Install other useful packages.
brew tap homebrew/command-not-found
brew tap vmware-tanzu/carvel
brew install ytt kbld kapp imgpkg kwt vendir
brew install git git-duet/tap/git-duet pre-commit gh
brew install k9s kind kubectl kubectx stern
brew install exa acarl005/homebrew-formulas/ls-go ripgrep procs bat tokei git-delta dust fd httpie chroma
brew install watch htop wget
brew install jesseduffield/lazydocker/lazydocker ctop dive
brew install jq yq
brew install grip
brew install aws-iam-authenticator
brew install step cfssl
brew install nmap
sudo apt-get install apache2-utils rsync -y
# Install Chrome
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo apt install ./google-chrome-stable_current_amd64.deb -y
rm ./google-chrome-stable_current_amd64.deb
google-chrome --version
mkdir "$HOME"/bin
# Install docker according to procedure from https://docs.docker.com/engine/install/debian/
sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release -y
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io -y
sudo usermod -aG docker "$USER"
sudo systemctl enable docker.service
sudo systemctl enable containerd.service
# Set up the Pinniped repo
mkdir workspace
pushd workspace
ssh-keyscan -H github.com >> $HOME/.ssh/known_hosts
# This assumes that you used `--ssh-flag=-A` when using `gcloud compute ssh` to log in to the host,
# which will forward your ssh identities.
git clone git@github.com:vmware-tanzu/pinniped.git
pushd pinniped
pre-commit install
popd
popd
set +x
echo
echo "Successfully installed deps!"

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Copyright 2022-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# This is similar to rsync.sh, but with the src and dest flipped at the end.
# It will copy all changes from the remote workstation back to your local machine (overwriting your local changes).
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
SRC_DIR=${SRC_DIR:-"$HOME/workspace/pinniped"}
src_dir_parent=$(dirname "$SRC_DIR")
dest_dir="./workspace/pinniped"
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
config_file="/tmp/gcp-ssh-config"
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ ! -d "$SRC_DIR" ]]; then
echo "ERROR: $SRC_DIR does not exist"
exit 1
fi
# Get the ssh fingerprints of all the GCP VMs.
gcloud compute config-ssh --ssh-config-file="$config_file" \
--project="$project" >/dev/null
cd "$SRC_DIR"
local_commit=$(git rev-parse --short HEAD)
remote_commit=$("$here"/ssh.sh "cd $dest_dir; git rev-parse --short HEAD" 2>/dev/null | tr -dc '[:print:]')
if [[ -z "$local_commit" || -z "$remote_commit" ]]; then
echo "ERROR: Could not determine currently checked out git commit sha"
exit 1
fi
if [[ "$local_commit" != "$remote_commit" ]]; then
echo "ERROR: Local and remote repos are not on the same commit. This is usually a mistake."
echo "Local was $SRC_DIR at *${local_commit}*"
echo "Remote was ${instance_name}:${dest_dir} at *${remote_commit}*"
exit 1
fi
# Skip large files because they are probably compiled binaries.
# Also skip other common filenames that we wouldn't need to sync.
echo "Starting rsync from remote to local for $SRC_DIR..."
rsync \
--progress --delete --archive --compress --human-readable \
--max-size 200K \
--exclude .git/ --exclude .idea/ --exclude .DS_Store --exclude '*.test' --exclude '*.out' \
--rsh "ssh -F $config_file" \
"${instance_user}@${instance_name}.${zone}.${project}:$dest_dir" "$src_dir_parent"

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
SRC_DIR=${SRC_DIR:-"$HOME/workspace/pinniped"}
dest_dir="./workspace"
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
config_file="/tmp/gcp-ssh-config"
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ ! -d "$SRC_DIR" ]]; then
echo "ERROR: $SRC_DIR does not exist"
exit 1
fi
# Get the ssh fingerprints of all the GCP VMs.
gcloud compute config-ssh --ssh-config-file="$config_file" \
--project="$project" >/dev/null
cd "$SRC_DIR"
local_commit=$(git rev-parse --short HEAD)
remote_commit=$("$here"/ssh.sh "cd $dest_dir/pinniped; git rev-parse --short HEAD" 2>/dev/null | tr -dc '[:print:]')
if [[ -z "$local_commit" || -z "$remote_commit" ]]; then
echo "ERROR: Could not determine currently checked out git commit sha"
exit 1
fi
if [[ "$local_commit" != "$remote_commit" ]]; then
echo "ERROR: Local and remote repos are not on the same commit. This is usually a mistake."
echo "Local was $SRC_DIR at *${local_commit}*"
echo "Remote was ${instance_name}:${dest_dir}/pinniped at *${remote_commit}*"
exit 1
fi
# Skip large files because they are probably compiled binaries.
# Also skip other common filenames that we wouldn't need to sync.
echo "Starting rsync for $SRC_DIR..."
rsync \
--progress --delete --archive --compress --human-readable \
--max-size 200K \
--exclude .git/ --exclude .idea/ --exclude .DS_Store --exclude '*.test' --exclude '*.out' \
--rsh "ssh -F $config_file" \
"$SRC_DIR" "${instance_user}@${instance_name}.${zone}.${project}:$dest_dir"

22
hack/remote-workstation/ssh.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
# Run ssh with identities forwarded so you can use them with git on the remote host.
# Optionally run an arbitrary command on the remote host.
# By default, start an interactive session.
gcloud compute ssh --ssh-flag=-A "$instance_user@$instance_name" \
--project="$project" --zone="$zone" -- "$@"

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
# Start an instance which was previously stopped to save money.
echo "Starting VM $instance_name..."
gcloud compute instances start "$instance_name" \
--project="$project" --zone="$zone"

20
hack/remote-workstation/stop.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
project="$PINNIPED_GCP_PROJECT"
zone="us-central1-b"
# Stop the instance, to save money, in a way that it can be restarted.
echo "Stopping VM $instance_name..."
gcloud compute instances stop "$instance_name" \
--project="$project" --zone="$zone"

87
hack/run-integration-tests.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# This script will prepare to run the integration tests and then run them.
# Is is a wrapper for prepare-for-integration-tests.sh to make it convenient
# to run the integration tests, potentially running them repeatedly.
set -euo pipefail
help=no
skip_build=no
delete_kind_cluster=no
PARAMS=""
while (("$#")); do
case "$1" in
-h | --help)
help=yes
shift
;;
-s | --skip-build)
skip_build=yes
shift
;;
-c | --from-clean-cluster)
delete_kind_cluster=yes
shift
;;
-*)
echo "Error: Unsupported flag $1" >&2
exit 1
;;
*)
PARAMS="$PARAMS $1"
shift
;;
esac
done
eval set -- "$PARAMS"
if [[ "$help" == "yes" ]]; then
me="$(basename "${BASH_SOURCE[0]}")"
echo "Usage:"
echo " $me [flags] [path/to/pinniped]"
echo
echo " path/to/pinniped default: \$PWD ($PWD)"
echo
echo "Flags:"
echo " -h, --help: print this usage"
echo " -s, --skip-build: reuse the most recently built image of the app instead of building"
echo " -c, --from-clean-cluster: delete and rebuild the kind cluster before running tests"
exit 1
fi
pinniped_path="${1-$PWD}"
cd "$pinniped_path" || exit 1
if [[ ! -f Dockerfile || ! -d deploy ]]; then
echo "$pinniped_path does not appear to be the path to the source code repo directory"
exit 1
fi
if ! command -v kind >/dev/null; then
echo "Please install kind. e.g. 'brew install kind' for MacOS"
exit 1
fi
if [[ "$delete_kind_cluster" == "yes" ]]; then
echo "Deleting running kind clusters to prepare a clean slate..."
"$pinniped_path"/hack/kind-down.sh
fi
if [[ "$skip_build" == "yes" ]]; then
"$pinniped_path"/hack/prepare-for-integration-tests.sh --skip-build
else
"$pinniped_path"/hack/prepare-for-integration-tests.sh
fi
source /tmp/integration-test-env
ulimit -n 512
echo
echo "Running integration tests..."
go test -race -v -count 1 -timeout 0 ./test/integration
echo "ALL INTEGRATION TESTS PASSED"

31
hack/setup-fly.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Define some env vars
source "$script_dir/fly-helpers.sh"
# Install the fly cli if needed
if [[ ! -f "$FLY_CLI" ]]; then
curl -fL "$CONCOURSE_URL/api/v1/cli?arch=amd64&platform=darwin" -o "$FLY_CLI"
chmod 755 "$FLY_CLI"
fi
if ! $FLY_CLI targets | tr -s ' ' | cut -f1 -d ' ' | grep -q "$CONCOURSE_TARGET"; then
# Create the target if needed
$FLY_CLI --target "$CONCOURSE_TARGET" login \
--team-name "$CONCOURSE_TEAM" --concourse-url "$CONCOURSE_URL"
else
# Login if needed
if ! $FLY_CLI --target "$CONCOURSE_TARGET" status; then
$FLY_CLI --target "$CONCOURSE_TARGET" login
fi
fi
# Upgrade fly if needed
$FLY_CLI --target "$CONCOURSE_TARGET" sync

60
infra/README.md Normal file
View File

@@ -0,0 +1,60 @@
# Installing and operating Concourse
Concourse is made up of a web deployment a worker deployment.
## Terraform
We use Terraform to create and update the IaaS infrastructure on which we run all the Concourse components.
This infrastructure must be created before deploying the corresponding Concourse components.
### Infrastructure Providers
We use Google Cloud for the infrastructure.
### Running Terraform
See [infra/terraform/gcloud/README.md](./terraform/gcloud/README.md) for details of using Terraform
to create or update the Google Cloud infrastructure for Concourse. This infrastructure will be used
to run the web and internal workers.
## Bootstrapping Secrets (after Terraform)
Before deploying Concourse for the first time, the
[infra/concourse-install/bootstrap-secrets.sh](./concourse-install/bootstrap-secrets.sh)
script must be used to auto-generate some values and store them in a new secret in the Secrets Manager.
This script only needs to be run once.
1. Create a github oauth client as described in https://concourse-ci.org/github-auth.html.
The callback URI should be set to `https://ci.pinniped.dev/sky/issuer/callback`.
Take note of the client ID and client secret for use in the next step.
2. Run `GITHUB_CLIENT_ID=<your_client_id> GITHUB_CLIENT_SECRET=<your_client_secret> ./bootstrap-secrets.sh`.
This will create a secret in the GCP Secrets Manager which includes the GitHub client info
along with some auto-generated secrets.
## Web Deployment
The "brains" of Concourse is its web deployment. It can be created and updated by running the
[infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh)
script on your laptop.
## Worker Deployments
We run our workers on the same GKE cluster where we run the web component.
See [infra/concourse-install/*-internal-workers.sh](./concourse-install) for scripts to deploy/update the workers,
scale the workers, and view the workers.
These workers can also be scaled by the jobs in the `concourse-workers` pipeline.
## Upgrading Concourse
To upgrade each deployment to a new version of Concourse:
1. If any infrastructure updates are needed, follow the terraform instructions again.
2. Change the version of the Helm Chart in the source code of the script used to create each deployment,
and then run each script to upgrade the deployment. Note that this will scale the internal workers deployment
back to its default number of replicas.
1. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh)
2. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-internal-workers.sh)
3. Commit and push those script changes.
4. Trigger the CI jobs to scale the internal workers back to the desired number as needed.

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# Require two env vars.
if [[ -z "${GITHUB_CLIENT_ID:-}" ]]; then
echo "GITHUB_CLIENT_ID env var must be set"
exit 1
fi
if [[ -z "${GITHUB_CLIENT_SECRET:-}" ]]; then
echo "GITHUB_CLIENT_SECRET env var must be set"
exit 1
fi
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
# Check pre-reqs.
if ! command -v gcloud &>/dev/null; then
echo "Please install the gcloud CLI"
exit
fi
if ! command -v yq &>/dev/null; then
echo "Please install the yq CLI"
exit
fi
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
# Create a temporary directory for secrets, cleaned up at the end of this script.
trap 'rm -rf "$TEMP_DIR"' EXIT
TEMP_DIR=$(mktemp -d) || exit 1
# Create the three keys required to install the Concourse web component.
# See https://github.com/concourse/concourse-chart/tree/master#secrets
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t rsa -f /keys/session-signing-key
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/worker-key
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/host-key
# Create an extra keypair for our external workers so they can use a different private key
# to avoid sharing the private key of the internal workers to other Kubernetes clusters.
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/external-worker-key
# Create an encryption key for DB encryption at rest.
printf "%s" "$(openssl rand -base64 24)" >"$TEMP_DIR/encryption-key"
# Write a tmp yaml file which bundles together all of the secrets from above.
# The structure of the keys in this file matches the concourse helm chart's values.yaml inputs,
# except for .secrets.externalWorkerKey which is our own custom key.
SECRETS_FILE="$TEMP_DIR/secrets.yaml"
echo "# This secret is auto-generated by infra/concourse-install/bootstrap-secrets.sh" >"$SECRETS_FILE"
yq -i e ".secrets.hostKey = \"$(cat "$TEMP_DIR/host-key")\"" "$SECRETS_FILE" # TSA host key
yq -i e ".secrets.hostKeyPub = \"$(cat "$TEMP_DIR/host-key.pub")\"" "$SECRETS_FILE" # TSA host key pub
yq -i e ".secrets.sessionSigningKey = \"$(cat "$TEMP_DIR/session-signing-key")\"" "$SECRETS_FILE"
yq -i e ".secrets.workerKey = \"$(cat "$TEMP_DIR/worker-key")\"" "$SECRETS_FILE"
yq -i e ".secrets.externalWorkerKey = \"$(cat "$TEMP_DIR/external-worker-key")\"" "$SECRETS_FILE"
# Put both public keys into the workerKeyPub secret, one on each line.
yq -i e ".secrets.workerKeyPub = \"$(cat "$TEMP_DIR/worker-key.pub" "$TEMP_DIR/external-worker-key.pub")\"" "$SECRETS_FILE"
yq -i e ".secrets.encryptionKey = \"$(cat "$TEMP_DIR/encryption-key")\"" "$SECRETS_FILE"
yq -i e ".secrets.githubClientId = \"$GITHUB_CLIENT_ID\"" "$SECRETS_FILE"
yq -i e ".secrets.githubClientSecret = \"$GITHUB_CLIENT_SECRET\"" "$SECRETS_FILE"
# Save the tmp yaml file into the GCP Secrets Manager for later use.
gcloud secrets create concourse-install-bootstrap \
--data-file "$SECRETS_FILE" \
--project "$PINNIPED_GCP_PROJECT"

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# This script deletes the concourse worker from our GKE environment using Helm.
HELM_RELEASE_NAME="concourse-workers"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if ! command -v gcloud &>/dev/null; then
echo "Please install the gcloud CLI"
exit
fi
if ! command -v yq &>/dev/null; then
echo "Please install the yq CLI"
exit
fi
if ! command -v kubectl &>/dev/null; then
echo "Please install the kubectl CLI"
exit
fi
if ! command -v helm &>/dev/null; then
echo "Please install the helm CLI"
exit
fi
if ! command -v terraform &>/dev/null; then
echo "Please install the terraform CLI"
exit
fi
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
# Create a temporary directory for secrets, cleaned up at the end of this script.
trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT
DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1
TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml"
# Get the output values from terraform.
pushd "$script_dir/../terraform/gcloud" >/dev/null
terraform output --json >"$TERRAFORM_OUTPUT_FILE"
popd >/dev/null
CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
# Download the admin kubeconfig for the cluster.
export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml"
gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE"
chmod 0600 "$KUBECONFIG"
# Dump out the cluster info for diagnostic purposes.
kubectl cluster-info
# Delete the helm chart.
helm uninstall -n concourse-worker "$HELM_RELEASE_NAME" \
--debug \
--wait

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# This script deploys the concourse worker component into our GKE environment using Helm
# and secrets from GCP and Terraform.
HELM_RELEASE_NAME="concourse-workers"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if ! command -v gcloud &>/dev/null; then
echo "Please install the gcloud CLI"
exit
fi
if ! command -v yq &>/dev/null; then
echo "Please install the yq CLI"
exit
fi
if ! command -v kubectl &>/dev/null; then
echo "Please install the kubectl CLI"
exit
fi
if ! command -v helm &>/dev/null; then
echo "Please install the helm CLI"
exit
fi
if ! command -v ytt &>/dev/null; then
echo "Please install the ytt CLI"
exit
fi
if ! command -v terraform &>/dev/null; then
echo "Please install the terraform CLI"
exit
fi
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
# Add/update the concourse helm repository.
helm repo add concourse https://concourse-charts.storage.googleapis.com/
helm repo update concourse
# Create a temporary directory for secrets, cleaned up at the end of this script.
trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT
DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1
TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml"
# Get the output values from terraform.
pushd "$script_dir/../terraform/gcloud" >/dev/null
terraform output --json >"$TERRAFORM_OUTPUT_FILE"
popd >/dev/null
CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
# Download the admin kubeconfig for the cluster.
export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml"
gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE"
chmod 0600 "$KUBECONFIG"
# Download some secrets. These were created once by bootstrap-secrets.sh.
BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml"
gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE"
TSA_HOST_KEY_PUB=$(yq eval '.secrets.hostKeyPub' "$BOOTSTRAP_SECRETS_FILE")
WORKER_PRIVATE_KEY=$(yq eval '.secrets.workerKey' "$BOOTSTRAP_SECRETS_FILE")
# Dump out the cluster info for diagnostic purposes.
kubectl cluster-info
# Some of the configuration options used below were inspired by how HushHouse runs on GKE.
# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/workers/values.yaml
# Install/upgrade the helm chart.
# These settings are documented in https://github.com/concourse/concourse-chart/blob/master/values.yaml
# Note that `--version` chooses the version of the concourse/concourse chart. Each version of the chart
# chooses which version of Concourse to install by defaulting the value for `imageTag` in its values.yaml file.
helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \
--version 17.3.1 \
--debug \
--install \
--wait \
--create-namespace \
--namespace concourse-worker \
--values "$script_dir/internal-workers/values-workers.yaml" \
--set concourse.worker.tsa.publicKey="$TSA_HOST_KEY_PUB" \
--set concourse.worker.tsa.workerPrivateKey="$WORKER_PRIVATE_KEY" \
--set secrets.workerKey="$WORKER_PRIVATE_KEY" \
--set secrets.hostKeyPub="$TSA_HOST_KEY_PUB" \
--post-renderer "$script_dir/internal-workers/ytt-helm-postrender-workers.sh"
# By default, it will not be possible for the autoscaler to scale down to one node.
# The autoscaler logs will show that the kube-dns pod cannot be moved. See
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
# for how to view and interpret the autoscaler logs.
# This seems to be the workaround for the "no.scale.down.node.pod.kube.system.unmovable" error
# that we were getting for the kube-dns pod in the logs.
kubectl create poddisruptionbudget kube-dns-pdb \
--namespace=kube-system \
--selector k8s-app=kube-dns \
--max-unavailable 1 \
--dry-run=client -o yaml | kubectl apply -f -

View File

@@ -0,0 +1,120 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# This script deploys the concourse web component into our GKE environment using Helm
# and secrets from GCP and Terraform.
HELM_RELEASE_NAME="concourse-web"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if ! command -v gcloud &>/dev/null; then
echo "Please install the gcloud CLI"
exit
fi
if ! command -v yq &>/dev/null; then
echo "Please install the yq CLI"
exit
fi
if ! command -v kubectl &>/dev/null; then
echo "Please install the kubectl CLI"
exit
fi
if ! command -v helm &>/dev/null; then
echo "Please install the helm CLI"
exit
fi
if ! command -v ytt &>/dev/null; then
echo "Please install the ytt CLI"
exit
fi
if ! command -v terraform &>/dev/null; then
echo "Please install the terraform CLI"
exit
fi
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
echo "Please run \`gcloud auth login\`"
exit 1
fi
# Add/update the concourse helm repository.
helm repo add concourse https://concourse-charts.storage.googleapis.com/
helm repo update concourse
# Create a temporary directory for secrets, cleaned up at the end of this script.
trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT
DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1
TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml"
# Get the output values from terraform.
pushd "$script_dir/../terraform/gcloud" >/dev/null
terraform output --json >"$TERRAFORM_OUTPUT_FILE"
popd >/dev/null
CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
WEB_IP_ADDRESS=$(yq eval '.web-ip.value' "$TERRAFORM_OUTPUT_FILE")
WEB_HOSTNAME=$(yq eval '.web-hostname.value' "$TERRAFORM_OUTPUT_FILE")
DB_IP_ADDRESS=$(yq eval '.database-ip.value' "$TERRAFORM_OUTPUT_FILE")
DB_USERNAME=$(yq eval '.database-username.value' "$TERRAFORM_OUTPUT_FILE")
DB_PASSWORD=$(yq eval '.database-password.value' "$TERRAFORM_OUTPUT_FILE")
DB_CA_CERT=$(yq eval '.database-ca-cert.value' "$TERRAFORM_OUTPUT_FILE")
DB_CLIENT_CERT=$(yq eval '.database-cert.value' "$TERRAFORM_OUTPUT_FILE")
DB_CLIENT_KEY=$(yq eval '.database-private-key.value' "$TERRAFORM_OUTPUT_FILE")
# Download the admin kubeconfig for the cluster.
export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml"
gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE"
chmod 0600 "$KUBECONFIG"
# Download some secrets. These were created once by bootstrap-secrets.sh.
BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml"
gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE"
# Dump out the cluster info for diagnostic purposes.
kubectl cluster-info
# Some of the configuration options used below were inspired by how HushHouse runs on GKE.
# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/hush-house/values.yaml
# Install/upgrade the helm chart.
# These settings are documented in https://github.com/concourse/concourse-chart/blob/master/values.yaml
# Note that `--version` chooses the version of the concourse/concourse chart. Each version of the chart
# chooses which version of Concourse to install by defaulting the value for `imageTag` in its values.yaml file.
helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \
--version 17.3.1 \
--debug \
--install \
--wait \
--create-namespace \
--namespace concourse-web \
--values "$script_dir/web/values-web.yaml" \
--values "$BOOTSTRAP_SECRETS_FILE" \
--set web.service.api.loadBalancerIP="$WEB_IP_ADDRESS" \
--set web.service.workerGateway.loadBalancerIP="$WEB_IP_ADDRESS" \
--set concourse.web.externalUrl="https://$WEB_HOSTNAME" \
--set concourse.web.postgres.host="$DB_IP_ADDRESS" \
--set secrets.postgresUser="$DB_USERNAME" \
--set secrets.postgresPassword="$DB_PASSWORD" \
--set secrets.postgresCaCert="$DB_CA_CERT" \
--set secrets.postgresClientCert="$DB_CLIENT_CERT" \
--set secrets.postgresClientKey="$DB_CLIENT_KEY" \
--post-renderer "$script_dir/web/ytt-helm-postrender-web.sh"
# By default, it will not be possible for the autoscaler to scale down to one node.
# The autoscaler logs will show that the kube-dns pod cannot be moved. See
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
# for how to view and interpret the autoscaler logs.
# This seems to be the workaround for the "no.scale.down.node.pod.kube.system.unmovable" error
# that we were getting for the kube-dns pod in the logs.
kubectl create poddisruptionbudget kube-dns-pdb \
--namespace=kube-system \
--selector k8s-app=kube-dns \
--max-unavailable 1 \
--dry-run=client -o yaml | kubectl apply -f -

View File

@@ -0,0 +1,24 @@
#! Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:overlay", "overlay")
#! Add resource requests and limits to the initContainer so the whole pod can be assigned "Guaranteed" QoS.
#! All containers must have requests equal to limits, including the initContainers.
#@overlay/match by=overlay.subset({"kind": "StatefulSet", "metadata":{"name":"concourse-worker"}}), expects=1
---
spec:
template:
spec:
initContainers:
- #@overlay/match by="name"
name: concourse-worker-init-rm
#@overlay/match missing_ok=True
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 1000m
memory: 1Gi

View File

@@ -0,0 +1,79 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-worker".
fullnameOverride: concourse
web:
enabled: false
postgresql:
enabled: false
worker:
# In an effort to save money, default to 1 worker.
replicas: 1
nodeSelector: { cloud.google.com/gke-nodepool: workers-2 } # the name of the nodepool from terraform
hardAntiAffinity: true
minAvailable: 0
terminationGracePeriodSeconds: 3600
livenessProbe:
periodSeconds: 60
failureThreshold: 10
timeoutSeconds: 45
resources:
# Inspired by https://github.com/concourse/hush-house/blob/16f52e57c273282ebace68051b0fe9133dc3a04e/deployments/with-creds/workers/values.yaml#L30-L32
#
# Note that Kubernetes uses Ki (Kibibytes) and Gi (Gibibytes). You can do conversions by doing google
# searches using the more commonly used names for those units, e.g. searching "29061248 KiB to GiB".
#
# Limit to using all available CPUs and most of the available memory in our e2-standard-8 VM nodes.
# According to the "Allocatable" section of the "kubectl describe nodes -l cloud.google.com/gke-nodepool=workers-2" output,
# each node has 29061248 Ki, which is equal to 27.7149658203 Gi of memory allocatable,
# and each node has 7910m cpu allocatable.
#
# By making our requests equal to our limits, we should be assigned "Guaranteed" QoS.
# But we need to leave enough space for all other pods' requests too, because GKE runs several pods on each node automatically.
# The first node in the node pool has the most pods scheduled on it, so we will choose our values based on the first node
# by looking at its "Allocated resources" section of the describe output.
# CPU:
# - On the first node, the other pods' CPU requests total 1324m (16%).
# - The available CPU for our pod is 7910m allocatable - 1324m allocated = 6586m remaining.
# Memory:
# - On the first node, the other pods' memory requests total 1394740096 (bytes) (4%) = 1.298952937126 Gi.
# - The available memory for our pod is 27.7149658203 Gi - 1.298952937126 Gi = 26.4160128832 Gi.
# However, Google can change these values over time, so we need to leave a little extra room
# in case Google's pods take a little more later.
#
# In order for the pod to be assigned "Guaranteed" QoS, all the containers need to
# have requests equal to limits, so the initContainer also has similar settings applied
# by the init-container-overlay.yaml overlay.
limits:
cpu: 6480m
memory: 26Gi
requests:
cpu: 6480m
memory: 26Gi
persistence:
worker:
size: 375Gi
storageClass: premium-rwo
concourse:
worker:
# rebalanceInterval: 2h
baggageclaim:
driver: overlay
healthcheckTimeout: 40s
runtime: containerd
containerd:
# networkPool: "10.254.0.0/16"
# maxContainers is usually set to 250, but increasing it to see if we can squeeze more from each worker.
maxContainers: 300
restrictedNetworks:
- 169.254.169.254/32
tsa:
hosts:
# This service name must match the name decided by the web deployment
- concourse-web-worker-gateway.concourse-web.svc.cluster.local:2222

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ytt -f "$script_dir/init-container-overlay-workers.yaml" -f-

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node.
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
CLUSTER="pinniped-concourse"
PROJECT="$PINNIPED_GCP_PROJECT"
ZONE="us-central1-c"
STATEFULSET="concourse-worker"
NAMESPACE="concourse-worker"
NODEPOOL="workers-2"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
gcloud auth activate-service-account \
"$GCP_USERNAME" \
--key-file <(echo "$GCP_JSON_KEY") \
--project "$PROJECT"
fi
trap 'rm -rf "$TEMP_DIR"' EXIT
TEMP_DIR=$(mktemp -d) || exit 1
# Download the admin kubeconfig for the GKE cluster created by terraform.
export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml"
gcloud container clusters get-credentials "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE"
current=$(kubectl get statefulset "$STATEFULSET" \
--namespace "$NAMESPACE" \
--output=jsonpath="{.spec.replicas}" \
--kubeconfig="${KUBECONFIG}")
desired=$((current - 1))
echo "current scale=$current"
echo "desired scale=$desired"
minNodes=$(gcloud container clusters describe "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE" \
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.minNodeCount")
if [[ $desired -lt $minNodes ]]; then
echo "ERROR: will not scale below the cluster autoscaler limit of $minNodes for the node pool"
exit 1
fi
kubectl scale \
--current-replicas=$current \
--replicas=$desired \
--kubeconfig="${KUBECONFIG}" \
--namespace "$NAMESPACE" \
"statefulset/$STATEFULSET"

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node.
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
CLUSTER="pinniped-concourse"
PROJECT="$PINNIPED_GCP_PROJECT"
ZONE="us-central1-c"
STATEFULSET="concourse-worker"
NAMESPACE="concourse-worker"
NODEPOOL="workers-2"
TARGET="pinniped"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
gcloud auth activate-service-account \
"$GCP_USERNAME" \
--key-file <(echo "$GCP_JSON_KEY") \
--project "$PINNIPED_GCP_PROJECT"
fi
trap 'rm -rf "$TEMP_DIR"' EXIT
TEMP_DIR=$(mktemp -d) || exit 1
# Download the admin kubeconfig for the GKE cluster created by terraform.
export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml"
gcloud container clusters get-credentials "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE"
current=$(kubectl get statefulset "$STATEFULSET" \
--namespace "$NAMESPACE" \
--output=jsonpath="{.spec.replicas}" \
--kubeconfig="${KUBECONFIG}")
minNodes=$(gcloud container clusters describe "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE" \
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.minNodeCount")
maxNodes=$(gcloud container clusters describe "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE" \
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.maxNodeCount")
echo
echo "current scale=$current, min=$minNodes, max=$maxNodes"
echo
echo "Current pods..."
kubectl get pods \
--output wide \
--namespace "$NAMESPACE" \
--kubeconfig="${KUBECONFIG}"
echo
echo "Volumes usage for current pods..."
kubectl get pods \
--namespace "${NAMESPACE}" \
--kubeconfig="${KUBECONFIG}" \
--template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' \
| xargs -n1 -I {} bash -c "echo \"{}: \" && kubectl exec {} -n ${NAMESPACE} -c concourse-worker --kubeconfig ${KUBECONFIG} -- df -ah /concourse-work-dir | sed \"s|^| |\"" \
echo
echo "Current nodes in nodepool $NODEPOOL..."
kubectl get nodes \
-l cloud.google.com/gke-nodepool=$NODEPOOL \
--kubeconfig="${KUBECONFIG}"
echo
echo "Current fly workers..."
if ! fly --target "$TARGET" status >/dev/null; then
fly --target "$TARGET" login
fi
fly --target "$TARGET" workers
echo ""
echo "Note: If the number of pods, nodes, and fly workers are not all the same,"
echo "and some time has passed since you have changed the scale, then something may be wrong."

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node.
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
echo "PINNIPED_GCP_PROJECT env var must be set"
exit 1
fi
CLUSTER="pinniped-concourse"
PROJECT="$PINNIPED_GCP_PROJECT"
ZONE="us-central1-c"
STATEFULSET="concourse-worker"
NAMESPACE="concourse-worker"
NODEPOOL="workers-2"
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
gcloud auth activate-service-account \
"$GCP_USERNAME" \
--key-file <(echo "$GCP_JSON_KEY") \
--project "$PROJECT"
fi
trap 'rm -rf "$TEMP_DIR"' EXIT
TEMP_DIR=$(mktemp -d) || exit 1
# Download the admin kubeconfig for the GKE cluster created by terraform.
export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml"
gcloud container clusters get-credentials "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE"
current=$(kubectl get statefulset "$STATEFULSET" \
--namespace "$NAMESPACE" \
--output=jsonpath="{.spec.replicas}" \
--kubeconfig="${KUBECONFIG}")
desired=$((current + 1))
echo "current scale=$current"
echo "desired scale=$desired"
maxNodes=$(gcloud container clusters describe "$CLUSTER" \
--project "$PROJECT" \
--zone "$ZONE" \
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.maxNodeCount")
if [[ $desired -gt $maxNodes ]]; then
echo "ERROR: will not scale above the cluster autoscaler limit of $maxNodes for the node pool"
exit 1
fi
kubectl scale \
--current-replicas=$current \
--replicas=$desired \
--kubeconfig="${KUBECONFIG}" \
--namespace "$NAMESPACE" \
"statefulset/$STATEFULSET"

View File

@@ -0,0 +1,24 @@
#! Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:overlay", "overlay")
#! Add resource requests and limits to the initContainer so the whole pod can be assigned "Guaranteed" QoS.
#! All containers must have requests equal to limits, including the initContainers.
#@overlay/match by=overlay.subset({"kind": "Deployment", "metadata":{"name":"concourse-web"}}), expects=1
---
spec:
template:
spec:
initContainers:
- #@overlay/match by="name"
name: concourse-migration
#@overlay/match missing_ok=True
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 1000m
memory: 1Gi

View File

@@ -0,0 +1,119 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-web".
fullnameOverride: concourse
worker:
enabled: false
postgresql:
enabled: false
web:
# In an effort to save money, default to 1 web server.
replicas: 1
nodeSelector: { cloud.google.com/gke-nodepool: generic-1 } # the name of the nodepool from terraform
additionalAffinities:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: concourse-web # see comment on fullnameOverride above
release: concourse-web # this must be the same name as the helm release in deploy-concourse-web.sh
service:
api:
type: LoadBalancer
workerGateway:
type: LoadBalancer
# The first node in the generic-1 nodepool (using e2-highcpu-8 VM) has lots of GKE and Kubernetes pods running on it.
# According to the "allocatable" section of the "kubectl get node -o yaml" output, the first node has
# 7910m cpu and 6179084 Ki memory (which is about 5.893 Gi).
# The total requests from the GKE/Kube pods is 1017m cpu and 1046766976 (bytes) memory (which is about 0.975 Gi).
# The difference between the allocatable memory and the requested memory is 4.918 Gi, so we will request slightly
# less than that to leave a little headroom on the cluster in case some of these pods get upgraded and decide
# to request more in the future. Similarly, the cpu difference is 6893m.
resources:
requests:
cpu: 6400m
memory: 4.7Gi
limits:
cpu: 6400m
memory: 4.7Gi
strategy:
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
concourse:
web:
localAuth:
enabled: false
auth:
mainTeam:
localUser: ""
github:
# From https://concourse-ci.org/github-auth.html...
# "Note that the client must be created under an organization if you want to authorize users based on
# organization/team membership. In addition, the GitHub application must have at least read access on
# the organization's members. If the client is created under a personal account, only individual users
# can be authorized."
# We requested that the owner of the vmware-tanzu org create an OIDC client for us.
# Because it was created in the org, it should have permissions to read team memberships during a login.
# The client ID and client secret are stored in the bootstrap secret in the Secrets Manager
# (see infra/README.md for more info about the bootstrap secret).
team: vmware-tanzu:pinniped-owners
github:
enabled: true
bindPort: 80
clusterName: pinniped-ci
# containerPlacementStrategy: random
defaultDaysToRetainBuildLogs: 60
# enableAcrossStep: true
# enablePipelineInstances: true
# enableBuildAuditing: true
# enableContainerAuditing: true
# enableGlobalResources: true
# enableJobAuditing: true
# enablePipelineAuditing: true
# enableResourceAuditing: true
# enableSystemAuditing: true
# enableTeamAuditing: true
# enableVolumeAuditing: true
# enableWorkerAuditing: true
enableCacheStreamedVolumes: true
enableResourceCausality: true
enableRedactSecrets: true
baggageclaimResponseHeaderTimeout: 10m
encryption:
enabled: true
kubernetes:
keepNamespaces: true
letsEncrypt:
enabled: true
acmeURL: "https://acme-v02.api.letsencrypt.org/directory"
tls:
enabled: true
bindPort: 443
postgres:
database: atc
sslmode: verify-ca
gc:
# See https://concourse-ci.org/performance-tuning.html#concourse_gc_failed_grace_period.
# Defaults to 5 days. This means that when lots of jobs in a pipeline fail, all of those
# containers will stick around for 5 days, causing you to quickly reach the max containers
# per worker and start seeing orange jobs complaining that they cannot start containers.
# Its nice for debugging when you can hijack a container of a job that failed a long time
# ago, but it comes at the cost of needing more workers to hold on to those containers.
failedGracePeriod: 10m
# logLevel: debug
tsa:
# logLevel: debug
secrets:
localUsers: ""

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ytt -f "$script_dir/init-container-overlay-web.yaml" -f-

View File

@@ -0,0 +1,64 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/google" {
version = "5.11.0"
constraints = "~> 5.0"
hashes = [
"h1:Ezg3fsY84CB/2P00ZwQEECuIfJd6UUYs5tIptN2kzsE=",
"h1:FV7t+G3+rJD3aN5Yr+FY8/cDG+FKhFCt8XvLJkqCcY8=",
"zh:444815a900947de3cb4e3aac48bf8cd98009130c110e3cee1e72698536046fee",
"zh:45ca22a2f44fe67f9ff71528dcd93493281e34bff7791f5eb24c86e76f32956d",
"zh:53e2e33824743e9e620454438de803de10572bd79ce16034abfc91ab1877be7a",
"zh:5eb699830a07320f896a3da7cdee169ab5fa356a6d38858b8b9337f1e4e30904",
"zh:6837cd8d9d63503e138ec3ebf52f850ca786824a3b0d5b9dfecec303f1656ca6",
"zh:7adde1fe2fc8966812bcbfeb24580cbb53f2f5301bd793eaa70ad753ba6b2d3c",
"zh:92052fd7ec776cd221f19db4624ae4ed1550c95c2984c9f3b6c54cea8896812b",
"zh:b0305aab81220b7d5711225224f5baad8fc6f5dd3a8199073966af8a151e2932",
"zh:e7b5aa624d89664803dd545f261261806b7f6607c19f6ceaf61f9011b0e02e63",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fbc04244e1f666ce0320b4eb0efb9cae460a5d688fc039637c8fe745665c19e5",
"zh:ff3553298929629ae2ad77000b3e050394e2f00c04e90a24268e3dfe6a6342c4",
]
}
provider "registry.terraform.io/hashicorp/google-beta" {
version = "5.11.0"
constraints = "~> 5.0"
hashes = [
"h1:izjzT8NnaePEXKbLQa+D4gw7HUYvK7NgIL3TJ23rjZk=",
"h1:teaW5i4Za+IHUuYSg3mRwJwVdLwKbND9UdCwG4MBvkY=",
"zh:0efa82e6fe2c83bd5280c3009db1c3acc9cdad3c9419b6ec721fbefc9f832449",
"zh:371df01e4f38b828195d115c9a8bebddebec4d34e9ef74cf3a79161da08e44b2",
"zh:5089967c420c5e4a4ba0d4c8c6ca344c7bb2476ec928f8319856260eacded369",
"zh:798a65c79386d356d6a097de680f4ece8982daae1cb0e10d6c53b383efef45f0",
"zh:90178911ac0e624c69a54a992fb3425ef09fdfb3e34b496ad7b6e168e80d4e0c",
"zh:b59c60f8479b8f0c8e91a93a4e707ce6d17c8e50e2f5afaf1d9a03c03cfedbf8",
"zh:c7f946282d80223ab3a6b284c22e4b53ffcd7b1a02449bb95a350007f30c87dc",
"zh:cd60e76987c2fdce2c84219eaff9390cd135f88aa9a27bc4d79a8fd4a8d09622",
"zh:de06bfa0393206c0253ebdea70821cb3b08ef87d5d4844be3ae463abfb4e1884",
"zh:de494bad600cca78986ce63d1018f5dbc1a1fcc2d4c41c94c15d5346f2b0dd1e",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:f97a8b6e83e0083dcb42a87e8e418ab33f12d641f9cdfdc92d154ba7fd7398fb",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=",
"h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

View File

@@ -0,0 +1,28 @@
# Terraform for Google Cloud Concourse Infrastructure
We used Terraform to create the infra needed for running our own Concourse.
This includes things like a GKE cluster, a static IP, a DNS entry, and a Postgres database.
NOTE: Do not manually edit these resources using the Google Cloud UI, API, or CLI.
Instead, please update the `.tf` files and follow the below steps again.
To run Terraform to create or update the infrastructure:
1. Install the `gcloud` CLI and authenticate as yourself, if you haven't already.
2. Use `gcloud auth application-default login` if you haven't already. This is not optional.
3. Install terraform if you haven't already. Use brew or brew install tfenv and then use tfenv.
At the time of writing this README, we were using Terraform v1.6.6.
4. cd into this directory: `cd infra/terraform/gcloud`
5. Run `terraform init`, if you haven't already for this directory.
6. Run `terraform fmt`.
7. Run `terraform validate`.
8. Run `TF_VAR_project=$PINNIPED_GCP_PROJECT terraform apply`.
This assumes that you have already exported an env var called `PINNIPED_GCP_PROJECT`
whose value is the name of the GCP project.
If you do not need to run `terraform apply` because someone else has already done that,
then you still need to follow the above directions up to and including running `terraform init`
to set up terraform on your computer.
To delete the entire Concourse deployment and all its related cloud infrastructure,
use `terraform destroy`. There is no way to undo this action. This will also delete the Cloud SQL
database which contains all CI job history.

View File

@@ -0,0 +1,26 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Use our pre-existing DNS zone.
data "google_dns_managed_zone" "main" {
name = var.dns-zone
}
# Reserved external static IPv4 address for the `web` instances.
# This is needed so that we can have a static IP for `ci.pinniped.dev`.
resource "google_compute_address" "main" {
name = "${var.subdomain}-${var.dns-zone}"
}
# Make a DNS A record for our subdomain to point at our new static IP.
resource "google_dns_record_set" "main" {
name = "${var.subdomain}.${data.google_dns_managed_zone.main.dns_name}"
type = "A"
ttl = 300
managed_zone = data.google_dns_managed_zone.main.name
rrdatas = [
google_compute_address.main.address,
]
}

View File

@@ -0,0 +1,10 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "ip" {
value = google_compute_address.main.address
}
output "hostname" {
value = trimsuffix(google_dns_record_set.main.name, ".")
}

View File

@@ -0,0 +1,12 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "dns-zone" {
description = "Name of the DNS zone"
type = string
}
variable "subdomain" {
description = "Subdomain under the DNS zone to register"
type = string
}

View File

@@ -0,0 +1,124 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
module "vpc" {
source = "./vpc"
name = var.name
region = var.region
vms-cidr = "10.10.0.0/16"
pods-cidr = "10.11.0.0/16"
services-cidr = "10.12.0.0/16"
}
resource "google_service_account" "default" {
account_id = "${var.name}-sa"
display_name = "GKE Node SA for ${var.name}"
}
# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster
resource "google_container_cluster" "main" {
# Allow "terraform destroy" for this cluster.
deletion_protection = false
name = var.name
location = var.zone
network = module.vpc.name
subnetwork = module.vpc.subnet-name
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. This allows node pools to be added and removed without recreating the cluster.
# So we create the smallest possible default node pool and immediately delete it.
remove_default_node_pool = true
initial_node_count = 1
min_master_version = "1.30.4-gke.1348000"
ip_allocation_policy {
cluster_secondary_range_name = module.vpc.pods-range-name
services_secondary_range_name = module.vpc.services-range-name
}
addons_config {
http_load_balancing {
disabled = false
}
horizontal_pod_autoscaling {
disabled = false
}
network_policy_config {
disabled = false
}
}
maintenance_policy {
daily_maintenance_window {
start_time = "03:00"
}
}
network_policy {
provider = "CALICO"
enabled = true
}
workload_identity_config {
workload_pool = "${var.project}.svc.id.goog"
}
cluster_autoscaling {
autoscaling_profile = "OPTIMIZE_UTILIZATION"
}
}
# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_node_pool
resource "google_container_node_pool" "main" {
provider = google-beta
for_each = var.node-pools
location = var.zone
cluster = google_container_cluster.main.name
name = each.key
autoscaling {
min_node_count = each.value.min
max_node_count = each.value.max
}
management {
auto_repair = true
auto_upgrade = each.value.auto-upgrade
}
node_config {
preemptible = each.value.preemptible
machine_type = each.value.machine-type
local_ssd_count = each.value.local-ssds
disk_size_gb = each.value.disk-size
disk_type = each.value.disk-type
image_type = each.value.image
workload_metadata_config {
mode = "GKE_METADATA"
}
metadata = {
disable-legacy-endpoints = "true"
}
# Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles.
service_account = google_service_account.default.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
}
timeouts {
create = "30m"
delete = "30m"
}
}

View File

@@ -0,0 +1,10 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "vpc-uri" {
value = module.vpc.uri
}
output "cluster-name" {
value = google_container_cluster.main.name
}

View File

@@ -0,0 +1,25 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "name" {
default = ""
description = "The name of the GKE cluster to be created."
}
variable "zone" {
default = ""
description = "The zone where the cluster should live."
}
variable "region" {
default = ""
description = "The region in which the cluster should be located at."
}
variable "project" {
description = "The Google GCP project to host the resources."
}
variable "node-pools" {
description = "A list of node pool configurations to create and assign to the cluster."
}

View File

@@ -0,0 +1,69 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
resource "google_compute_network" "main" {
name = var.name
auto_create_subnetworks = "false"
}
resource "google_compute_subnetwork" "main" {
name = "${var.name}-sn-1"
ip_cidr_range = var.vms-cidr
network = google_compute_network.main.name
region = var.region
secondary_ip_range = [
{
range_name = var.pods-range-name
ip_cidr_range = var.pods-cidr
},
{
range_name = var.services-range-name
ip_cidr_range = var.services-cidr
}
]
}
resource "google_compute_firewall" "internal-ingress" {
name = "${var.name}-internal"
network = google_compute_network.main.name
direction = "INGRESS"
source_ranges = [
var.vms-cidr,
var.pods-cidr,
var.services-cidr,
]
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
}
resource "google_compute_firewall" "external-ingress" {
name = "${var.name}-external"
network = google_compute_network.main.name
direction = "INGRESS"
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
ports = ["22"]
}
source_ranges = ["0.0.0.0/0"]
}

View File

@@ -0,0 +1,22 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "name" {
value = google_compute_network.main.name
}
output "subnet-name" {
value = google_compute_subnetwork.main.name
}
output "pods-range-name" {
value = var.pods-range-name
}
output "services-range-name" {
value = var.services-range-name
}
output "uri" {
value = google_compute_network.main.self_link
}

View File

@@ -0,0 +1,32 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "name" {
description = "TODO"
}
variable "region" {
description = "TODO"
}
variable "vms-cidr" {
description = "TODO"
}
variable "pods-cidr" {
description = "TODO"
}
variable "pods-range-name" {
default = "pods-range"
description = "TODO"
}
variable "services-cidr" {
description = "TODO"
}
variable "services-range-name" {
default = "services-range"
description = "TODO"
}

View File

@@ -0,0 +1,78 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# A piece of randomization that gets consumed by the
# `google_sql_database_instance` resources.
#
# This is needed in order to facilitate creating and recreating instances
# without waiting for the whole period that GCP requires to reuse name.
resource "random_id" "instance-name" {
byte_length = 4
}
resource "google_sql_database_instance" "main" {
name = "${var.name}-${random_id.instance-name.hex}"
region = var.region
database_version = "POSTGRES_15"
settings {
availability_type = "ZONAL"
disk_autoresize = true
disk_type = "PD_SSD"
tier = "db-custom-${var.cpus}-${var.memory_mb}"
database_flags {
name = "log_min_duration_statement"
value = "-1"
}
database_flags {
name = "max_connections"
value = var.max_connections
}
ip_configuration {
ipv4_enabled = "true"
require_ssl = "true"
authorized_networks {
name = "all"
value = "0.0.0.0/0"
}
}
backup_configuration {
enabled = true
start_time = "23:00"
}
location_preference {
zone = var.zone
}
}
}
resource "google_sql_database" "atc" {
name = "atc"
instance = google_sql_database_instance.main.name
charset = "UTF8"
collation = "en_US.UTF8"
}
resource "random_string" "password" {
length = 32
special = true
}
resource "google_sql_user" "user" {
name = "atc"
instance = google_sql_database_instance.main.name
password = random_string.password.result
}
resource "google_sql_ssl_cert" "cert" {
common_name = "atc"
instance = google_sql_database_instance.main.name
}

View File

@@ -0,0 +1,30 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "username" {
value = google_sql_user.user.name
}
output "password" {
sensitive = true
value = random_string.password.result
}
output "ip" {
value = google_sql_database_instance.main.ip_address[0].ip_address
}
output "ca-cert" {
sensitive = true
value = google_sql_database_instance.main.server_ca_cert[0].cert
}
output "cert" {
sensitive = true
value = google_sql_ssl_cert.cert.cert
}
output "private-key" {
sensitive = true
value = google_sql_ssl_cert.cert.private_key
}

View File

@@ -0,0 +1,37 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "name" {
default = ""
description = "The name of the CloudSQL instance to create (ps.: a random ID is appended to this name)"
}
variable "memory_mb" {
default = ""
description = "Number of MBs to assign to the CloudSQL instance."
}
variable "cpus" {
default = ""
description = "Number of CPUs to assign to the CloudSQL instance."
}
variable "zone" {
default = ""
description = "The zone where this instance is supposed to be created at (e.g., us-central1-a)"
}
variable "region" {
default = ""
description = "The region where the instance is supposed to be created at (e.g., us-central1)"
}
variable "disk_size_gb" {
default = ""
description = "The disk size in GB's (e.g. 10)"
}
variable "max_connections" {
default = ""
description = "The max number of connections allowed by postgres"
}

View File

@@ -0,0 +1,33 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
terraform {
required_providers {
google = "~> 5"
google-beta = "~> 5"
}
backend "gcs" {
# By not providing credentials, you will use your current identity from the gcloud CLI.
# credentials = "gcp.json"
bucket = "tanzu-user-authentication-terraform-state"
prefix = "pinniped-concourse-jan2024"
}
}
provider "google" {
# By not providing credentials, you will use your current identity from the gcloud CLI.
# credentials = "gcp.json"
project = var.project
region = var.region
zone = var.zone
}
# `google-beta` provides us access to GCP's beta APIs.
provider "google-beta" {
# By not providing credentials, you will use your current identity from the gcloud CLI.
# credentials = "gcp.json"
project = var.project
region = var.region
zone = var.zone
}

View File

@@ -0,0 +1,61 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# The static IP and related DNS entry.
module "address" {
source = "./address"
dns-zone = var.dns-zone
subdomain = var.subdomain
}
# Instantiates the GKE Kubernetes cluster.
module "cluster" {
source = "./cluster"
name = "pinniped-concourse"
project = var.project
region = var.region
zone = var.zone
node-pools = {
"generic-1" = {
auto-upgrade = true
disk-size = "50"
disk-type = "pd-ssd"
image = "COS_CONTAINERD"
local-ssds = 0
machine-type = "e2-highcpu-8" # 8 vCPU and 4 GB memory
max = 2
min = 1
preemptible = false
version = "1.30.4-gke.1348000"
},
"workers-2" = {
auto-upgrade = true
disk-size = "100"
disk-type = "pd-ssd"
image = "UBUNTU_CONTAINERD"
local-ssds = 0
machine-type = "c3-standard-8" # 8 vCPU and 32 GB memory
max = 5
min = 1
preemptible = false
version = "1.30.4-gke.1348000"
},
}
}
# Creates the CloudSQL Postgres database to be used by the Concourse deployment.
module "database" {
source = "./database"
name = "pinniped-concourse"
cpus = "4"
memory_mb = "7680"
region = var.region
zone = var.zone
max_connections = "300"
}

View File

@@ -0,0 +1,54 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
output "web-ip" {
value = module.address.ip
}
output "web-hostname" {
value = module.address.hostname
}
output "database-ip" {
value = module.database.ip
}
output "database-ca-cert" {
sensitive = true
value = module.database.ca-cert
}
output "database-username" {
value = module.database.username
}
output "database-password" {
sensitive = true
value = module.database.password
}
output "database-cert" {
sensitive = true
value = module.database.cert
}
output "database-private-key" {
sensitive = true
value = module.database.private-key
}
output "project" {
value = var.project
}
output "region" {
value = var.region
}
output "zone" {
value = var.zone
}
output "cluster-name" {
value = module.cluster.cluster-name
}

View File

@@ -0,0 +1,28 @@
# Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
variable "project" {
description = "The Google GCP project to host the resources"
type = string
# Please provide the value of this variable by setting the env var TF_VAR_project for all terraform commands.
}
variable "region" {
description = "The cloud provider region where the resources created"
default = "us-central1"
}
variable "zone" {
description = "The cloud provider zone where the resources are created"
default = "us-central1-c"
}
variable "dns-zone" {
description = "The default DNS zone to use when creating subdomains"
default = "pinniped-dev"
}
variable "subdomain" {
description = "Subdomain under the DNS zone to register"
default = "ci"
}

View File

@@ -0,0 +1,68 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
display:
background_image: https://upload.wikimedia.org/wikipedia/commons/9/9d/Seal_cleaning_itself.jpg
resources:
- name: pinniped-ci
type: git
icon: github
source:
uri: git@github.com:vmware-tanzu/pinniped.git
branch: ci
private_key: ((source-repo-deploy-key))
jobs:
# Here is a recommendation for how to use these tasks to clean up our AWS
# environment.
#
# 1. Run dryrun-cleanup-aws and look at the listed resources to make sure you aren't
# deleting anything that you don't want to.
# 2. Run danger-danger-cleanup-aws to actually delete resources.
# 3. Run list-all-aws-resources to view ALL resources left in our AWS account.
# Consider if we want to add any of those resources to our cleanup task's config.
- name: danger-danger-cleanup-aws
public: false # hide logs
serial: true
plan:
- get: pinniped-ci
- task: cleanup-aws
file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml
params:
AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number))
AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id))
AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret))
AWS_ROLE_ARN: ((aws-cleanup-role-arn))
REALLY_CLEANUP: "yes"
- name: dryrun-cleanup-aws
public: false # hide logs
serial: true
plan:
- get: pinniped-ci
- task: preview-cleanup-aws-without-actually-deleting-anything
file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml
params:
AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number))
AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id))
AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret))
AWS_ROLE_ARN: ((aws-cleanup-role-arn))
- name: list-all-aws-resources
public: false # hide logs
serial: true
plan:
- get: pinniped-ci
- task: list-all-aws-resources
file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml
params:
AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number))
AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id))
AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret))
AWS_ROLE_ARN: ((aws-cleanup-role-arn))
ALL_RESOURCES: "yes"

View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"

View File

@@ -0,0 +1,122 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
display:
background_image: https://cdn.pixabay.com/photo/2020/09/16/22/09/pool-5577567_1280.jpg
meta:
# GCP account info and which zone the workers should be created in and deleted from.
gke_admin_params: &gke_admin_params
INSTANCE_ZONE: us-west1-b
PINNIPED_GCP_PROJECT: ((gcp-project-name))
GCP_USERNAME: ((gke-cluster-developer-username))
GCP_JSON_KEY: ((gke-cluster-developer-json-key))
# GCP account info and which zone the workers should be created in and deleted from.
gcp_account_params: &gcp_account_params
INSTANCE_ZONE: us-central1-b
GCP_PROJECT: ((gcp-project-name))
GCP_USERNAME: ((gcp-instance-admin-username))
GCP_JSON_KEY: ((gcp-instance-admin-json-key))
resources:
- name: pinniped-ci
type: git
icon: github
source:
uri: git@github.com:vmware-tanzu/pinniped.git
branch: ci
private_key: ((source-repo-deploy-key))
- name: k8s-app-deployer-image
type: registry-image
icon: docker
source:
repository: ((ci-ghcr-registry))/k8s-app-deployer
username: ((ci-ghcr-pusher-username))
password: ((ci-ghcr-pusher-token))
tag: latest
- name: gcloud-image
type: registry-image
icon: docker
source:
repository: google/cloud-sdk
tag: slim
- name: hourly
type: time
icon: calendar-clock
source:
interval: 1h
# In an effort to save money, no longer automatically scale our workers up and down on a schedule.
# - name: end-of-business-day
# type: time
# icon: calendar-clock
# source:
# location: America/Los_Angeles
# start: 7:00 PM
# stop: 8:00 PM
# days: [ Monday, Tuesday, Wednesday, Thursday, Friday ]
#
# - name: start-of-business-day
# type: time
# icon: calendar-clock
# source:
# location: America/New_York
# start: 5:30 AM
# stop: 6:30 AM
# days: [ Monday, Tuesday, Wednesday, Thursday, Friday ]
jobs:
- name: scale-up-internal-workers
public: true # all logs are publicly visible
plan:
- in_parallel:
- get: pinniped-ci
- get: k8s-app-deployer-image
# - get: start-of-business-day
# trigger: true
- task: scale-up
timeout: 30m
file: pinniped-ci/pipelines/concourse-workers/scale-up-gke-replicas.yml
image: k8s-app-deployer-image
params:
<<: *gke_admin_params
- name: scale-down-internal-workers
public: true # all logs are publicly visible
plan:
- in_parallel:
- get: pinniped-ci
- get: k8s-app-deployer-image
# - get: end-of-business-day
# trigger: true
- task: scale-down
timeout: 30m
file: pinniped-ci/pipelines/concourse-workers/scale-down-gke-replicas.yml
image: k8s-app-deployer-image
params:
<<: *gke_admin_params
- name: remove-orphaned-vms
public: true # all logs are publicly visible
plan:
- in_parallel:
- get: pinniped-ci
- get: gcloud-image
- get: hourly
trigger: true
- task: remove-orphaned-kind-cluster-vms
attempts: 2
timeout: 25m
file: pinniped-ci/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml
image: gcloud-image
params:
<<: *gcp_account_params

View File

@@ -0,0 +1,13 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
---
platform: linux
inputs:
- name: pinniped-ci
params:
PINNIPED_GCP_PROJECT:
GCP_SERVICE_ACCOUNT:
GCP_JSON_KEY:
run:
path: pinniped-ci/infra/concourse-install/scale-down-concourse-internal-workers.sh

View File

@@ -0,0 +1,13 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
---
platform: linux
inputs:
- name: pinniped-ci
params:
PINNIPED_GCP_PROJECT:
GCP_SERVICE_ACCOUNT:
GCP_JSON_KEY:
run:
path: pinniped-ci/infra/concourse-install/scale-up-concourse-internal-workers.sh

View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
ensure_time_resource_has_at_least_one_version "$pipeline" daily
# Make the pipeline visible to non-authenticated users in the web UI.
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"

View File

@@ -0,0 +1,146 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
display:
background_image: https://upload.wikimedia.org/wikipedia/commons/6/68/Mirounga_leonina.jpg
meta:
build_pinniped: &build_pinniped
config:
platform: linux
inputs:
- name: pinniped-source
run:
path: bash
args:
- "-c"
- |
set -exuo pipefail
go version
cd pinniped-source/
# compile all of our code
go build -o /dev/null ./...
# compile (but don't actually run) all of our tests
go test ./... -run=nothing
resources:
- name: daily
type: time
icon: calendar-clock
source:
location: America/Los_Angeles
start: 4:00 AM
stop: 5:00 AM
days: [ Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday ]
- name: pinniped-source
type: git
icon: github
source:
uri: git@github.com:vmware-tanzu/pinniped.git
branch: main
private_key: ((source-repo-deploy-key))
- name: go-1.22-image
type: registry-image
icon: docker
source:
repository: docker.io/golang
tag: "1.22"
jobs:
- name: go-install-cli
public: true # all logs are publicly visible
serial: true
plan:
- get: daily
trigger: true
- task: go-install
config:
platform: linux
image_resource:
type: registry-image
source:
repository: docker.io/golang
run:
path: bash
args:
- "-c"
- |
set -exuo pipefail
go install -v go.pinniped.dev/cmd/pinniped@latest
# This job attempts to check whether it's possible to depend on our API client submodule.
# It creates a simple test application with go.mod and main.go files, then attempts to compile it.
#
# As of now, this is known to be broken so we've decided to disable this job.
# - name: go-get-submodule
# serial: true
# plan:
# - get: daily
# trigger: true
# - task: go-get
# config:
# platform: linux
# image_resource:
# type: registry-image
# source:
# repository: docker.io/golang
# run:
# path: bash
# args:
# - "-c"
# - |
# set -euo pipefail
# mkdir /work
# cd /work
# cat << EOF > go.mod
# module testapp
# go 1.14
# require (
# go.pinniped.dev/generated/1.18/apis v0.0.0-00010101000000-000000000000
# go.pinniped.dev/generated/1.18/client v0.0.0-20200918195624-2d4d7e588a18
# )
# replace (
# go.pinniped.dev/generated/1.18/apis v0.0.0-00010101000000-000000000000 => go.pinniped.dev/generated/1.18/apis v0.0.0-20200918195624-2d4d7e588a18
# )
# EOF
# cat << EOF > main.go
# package main
# import (
# _ "go.pinniped.dev/generated/1.18/apis/idp/v1alpha1"
# _ "go.pinniped.dev/generated/1.18/client/clientset/versioned"
# )
# func main() {}
# EOF
# head -100 go.mod main.go
# set -x
# go mod download
# go build -o testapp main.go
- name: go-1.22-compatibility
public: true # all logs are publicly visible
serial: true
plan:
- in_parallel:
- get: daily
trigger: true
- get: pinniped-source
- get: go-1.22-image
- task: build
image: go-1.22-image
<<: *build_pinniped

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
ensure_time_resource_has_at_least_one_version "$pipeline" daily

View File

@@ -0,0 +1,109 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
display:
background_image: https://upload.wikimedia.org/wikipedia/commons/2/2b/Grey_seal_animal_halichoerus_grypus.jpg
meta:
notify_on_failure: &notify_on_failure
on_failure:
put: gchat
timeout: 5m
params:
text: |
Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :(
${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME}
# GCP account info and which zone the workers should be created in and deleted from.
gcp_account_params: &gcp_account_params
INSTANCE_ZONE: us-central1-b
GCP_PROJECT: ((gcp-project-name))
GCP_USERNAME: ((gcp-instance-admin-username))
GCP_JSON_KEY: ((gcp-instance-admin-json-key))
resource_types:
- name: google-chat-notify-resource
type: docker-image
source:
repository: springio/google-chat-notify-resource
tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags
# We are only doing pulls of this resource type, but add the username and password to avoid
# hitting a rate limit. Our free account is only allowed to have one access token, so we
# cannot make a read-only token for performing pulls.
username: getpinniped
password: ((getpinniped-dockerhub-image-push-access-token))
resources:
- name: gcloud-image
type: registry-image
icon: docker
source:
repository: google/cloud-sdk
tag: slim
- name: pinniped-ci
type: git
icon: github
source:
uri: git@github.com:vmware-tanzu/pinniped.git
branch: ci
private_key: ((source-repo-deploy-key))
- name: daily
type: time
icon: calendar-clock
source:
location: America/Los_Angeles
start: 1:00 AM
stop: 2:00 AM
days: [ Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday ]
- name: gchat
type: google-chat-notify-resource
icon: chat-outline
source:
url: ((gchat-project-pinniped-bots-webhook-url))
jobs:
- name: build-kind-node-image-kube-main-latest
public: true # all logs are publicly visible
<<: *notify_on_failure
plan:
- in_parallel:
- get: pinniped-ci
- get: gcloud-image
- get: daily
trigger: true
- task: create-kind-node-builder-vm
timeout: 30m
file: pinniped-ci/pipelines/shared-tasks/create-kind-node-builder-vm/task.yml
image: gcloud-image
params:
<<: *gcp_account_params
- task: build-kind-node-image
timeout: 90m
file: pinniped-ci/pipelines/shared-tasks/build-kind-node-image/task.yml
image: gcloud-image
input_mapping:
instance: create-kind-node-builder-vm-output
params:
PUSH_TO_IMAGE_REGISTRY: "ghcr.io"
PUSH_TO_IMAGE_REPO: "pinniped-ci-bot/kind-node-image"
DOCKER_USERNAME: ((ci-ghcr-pusher-username))
DOCKER_PASSWORD: ((ci-ghcr-pusher-token))
<<: *gcp_account_params
ensure:
task: remove-instance
attempts: 2
timeout: 20m
file: pinniped-ci/pipelines/shared-tasks/remove-gce-worker-vm/task.yml
image: gcloud-image
input_mapping:
concourse-worker-pool: create-kind-node-builder-vm-output
params:
<<: *gcp_account_params

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
ensure_time_resource_has_at_least_one_version "$pipeline" daily
# Make the pipeline visible to non-authenticated users in the web UI.
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"

2788
pipelines/main/pipeline.yml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
ensure_time_resource_has_at_least_one_version "$pipeline" weekdays
# Make the pipeline visible to non-authenticated users in the web UI.
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
# Make the pipeline visible to non-authenticated users in the web UI.
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"

View File

@@ -0,0 +1,267 @@
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
display:
background_image: https://upload.wikimedia.org/wikipedia/commons/d/d0/KelpforestI2500ppx.JPG
meta:
notify_on_failure: &notify_on_failure
on_failure:
put: gchat
timeout: 5m
params:
text: |
Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :(
${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME}
resource_types:
# Try using the latest version of the registry-image resource because of this problem:
# https://vmware.slack.com/archives/C6TL2PMC7/p1702052766131149
- name: registry-image
type: registry-image
source:
repository: concourse/registry-image-resource
tag: latest
# We are only doing pulls of this resource type, but add the username and password to avoid
# hitting a rate limit. Our free account is only allowed to have one access token, so we
# cannot make a read-only token for performing pulls.
username: getpinniped
password: ((getpinniped-dockerhub-image-push-access-token))
- name: google-chat-notify-resource
type: docker-image
source:
repository: springio/google-chat-notify-resource
tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags
# We are only doing pulls of this resource type, but add the username and password to avoid
# hitting a rate limit. Our free account is only allowed to have one access token, so we
# cannot make a read-only token for performing pulls.
username: getpinniped
password: ((getpinniped-dockerhub-image-push-access-token))
resources:
- name: pinniped-source
type: git
icon: github
source:
uri: https://github.com/vmware-tanzu/pinniped.git
branch: main
- name: pinniped-ci
type: git
icon: github
source:
uri: git@github.com:vmware-tanzu/pinniped.git
branch: ci
private_key: ((source-repo-deploy-key))
- name: pinniped-latest-release-image
type: registry-image
icon: docker
source:
repository: ghcr.io/vmware-tanzu/pinniped/pinniped-server
tag: latest
- name: pinniped-latest-main-image
type: registry-image
icon: docker
source:
repository: ((ci-ghcr-registry))/ci-build
username: ((ci-ghcr-puller-username))
password: ((ci-ghcr-puller-token))
tag: latest
- name: gh-cli-image
type: registry-image
icon: docker
source:
repository: ((ci-ghcr-registry))/gh-cli
username: ((ci-ghcr-puller-username))
password: ((ci-ghcr-puller-token))
tag: latest
- name: golang-image
type: registry-image
icon: docker
source:
repository: docker.io/golang
- name: crane-image
type: registry-image
icon: docker
source:
repository: ((ci-ghcr-registry))/crane
username: ((ci-ghcr-puller-username))
password: ((ci-ghcr-puller-token))
- name: weekdays
type: time
icon: calendar-clock
source:
location: America/Los_Angeles
start: 6:00 AM
stop: 7:00 AM
days: [ Monday, Tuesday, Wednesday, Thursday, Friday ]
- name: gchat
type: google-chat-notify-resource
icon: chat-outline
source:
url: ((gchat-project-pinniped-bots-webhook-url))
jobs:
- name: nancy-main
public: true # all logs are publicly visible
serial: true
plan:
- in_parallel:
- get: weekdays
trigger: true
- get: pinniped-source
- task: get-modules
config:
platform: linux
image_resource:
type: registry-image
source:
repository: docker.io/golang
inputs:
- name: pinniped-source
outputs:
- name: pinniped-modules
run:
dir: "pinniped-source"
path: sh
args:
- "-c"
- |
set -e
echo "Installing jq..."
( apt-get update -y && apt-get install -y jq ) 2>&1 > install.log || cat install.log
# Use 'go list' to find package dependencies, then select the associated module versions.
# See https://github.com/sonatype-nexus-community/nancy/issues/228 for details about why
# we can't just use 'go list -mod -json all'.
echo "Listing Go module dependencies..."
go list -deps -json all | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' > ../pinniped-modules/modules.json
- task: scan
config:
platform: linux
image_resource:
type: registry-image
source:
repository: docker.io/sonatypecommunity/nancy
tag: alpine
inputs:
- name: pinniped-modules
run:
path: 'sh'
args:
- '-c'
- |
set -e
cat <<EOF > exclusions.txt
# Vulnerability exclusions for Nancy:
# https://github.com/sonatype-nexus-community/nancy#exclude-vulnerabilities
#
# When editing this, please add an `until=` tag on each entry so we remember to revisit
# and clean this file later.
# CVE-0000-00000 until=2022-01-01
#
# CVE-2020-8561 is in k8s.io/apiserver@v0.27.1,
# which is the latest version as of 2023-05-10.
# From the comments on this issue https://github.com/kubernetes/kubernetes/issues/104720
# it seems like the Kubernetes maintainers are never going to fix it.
# Removing the "until" date on the next line to ignore this CVE forever.
CVE-2020-8561
EOF
nancy sleuth --exclude-vulnerability-file=exclusions.txt < pinniped-modules/modules.json
- name: trivy-release
public: true # all logs are publicly visible
serial: true
plan:
- in_parallel:
- get: weekdays
trigger: true
- get: pinniped-latest-release-image
params:
format: oci
- get: pinniped-ci
- task: scan
file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml
params:
GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission))
input_mapping:
image: pinniped-latest-release-image
- name: trivy-main
public: true # all logs are publicly visible
serial: true
plan:
- in_parallel:
- get: weekdays
trigger: true
- get: pinniped-latest-main-image
params:
format: oci
- get: pinniped-ci
- task: scan
file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml
params:
GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission))
input_mapping:
image: pinniped-latest-main-image
- name: all-golang-deps-updated
public: true # all logs are publicly visible
<<: *notify_on_failure
serial: true
plan:
- in_parallel:
- get: weekdays
trigger: true
- get: pinniped-source
- get: pinniped-ci
- get: gh-cli-image
- get: crane-image
- get: golang-image
params:
skip_download: true
- task: check-golang-deps-updated
file: pinniped-ci/pipelines/shared-tasks/check-golang-deps-updated/task.yml
input_mapping:
pinniped-in: pinniped-source
- task: check-dockerfile-deps-updated
image: crane-image
file: pinniped-ci/pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml
input_mapping:
pinniped-in: pinniped-out # the output of the previous task
- task: create-or-update-pr
image: gh-cli-image
file: pinniped-ci/pipelines/shared-tasks/create-or-update-pr/task.yml
params:
DEPLOY_KEY: ((source-repo-deploy-key))
GH_TOKEN: ((ci-bot-access-token-with-public-repo-write-permission))
input_mapping:
pinniped: pinniped-out
- name: run-go-vuln-scan
public: true # all logs are publicly visible
plan:
- in_parallel:
- get: pinniped-source
trigger: true
- get: pinniped-ci
- task: run-go-vuln-scan
file: pinniped-ci/pipelines/shared-tasks/run-go-vuln-scan/task.yml
input_mapping:
pinniped: pinniped-source
params:
BUILD_TAGS:

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pipeline=$(basename "$script_dir")
source "$script_dir/../../hack/fly-helpers.sh"
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
ensure_time_resource_has_at_least_one_version "$pipeline" weekdays
# Make the pipeline visible to non-authenticated users in the web UI.
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More