mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-15 18:23:10 +00:00
Compare commits
856 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0a81c82b0 | ||
|
|
adfcab17fa | ||
|
|
c2418f2eb8 | ||
|
|
e3d0a15536 | ||
|
|
892f624f61 | ||
|
|
86a870950f | ||
|
|
c786d998a1 | ||
|
|
36d759790e | ||
|
|
902b5e575d | ||
|
|
cf2b3d7113 | ||
|
|
2b73862706 | ||
|
|
179a6e5587 | ||
|
|
541d3680b9 | ||
|
|
c4ad5eb3ce | ||
|
|
ebc4c680f5 | ||
|
|
fe01772b08 | ||
|
|
06f0728479 | ||
|
|
2d3c1515a0 | ||
|
|
9526eae770 | ||
|
|
60d736fc36 | ||
|
|
3f916544ba | ||
|
|
e551207055 | ||
|
|
faff223ef0 | ||
|
|
35f8c5876d | ||
|
|
5c15a4bc0e | ||
|
|
217b6b5620 | ||
|
|
3640096b6c | ||
|
|
8a40bcd4d6 | ||
|
|
649cbd7dec | ||
|
|
e18d2b4c18 | ||
|
|
acfbd225c6 | ||
|
|
8cd06c08d7 | ||
|
|
cff9eaa0c7 | ||
|
|
d663974450 | ||
|
|
b50f8fbef8 | ||
|
|
11a293d7f2 | ||
|
|
ee5df9c6a1 | ||
|
|
b69bfa898d | ||
|
|
3874dff7b8 | ||
|
|
be9c65fbc9 | ||
|
|
329c84867a | ||
|
|
6e68dae48b | ||
|
|
22fe874fcf | ||
|
|
f973bfc091 | ||
|
|
39adc20d79 | ||
|
|
9a65f4415c | ||
|
|
e665173533 | ||
|
|
f9c14e817d | ||
|
|
154074ec9c | ||
|
|
b71ec602c0 | ||
|
|
a5db59bf56 | ||
|
|
ea1fd7756e | ||
|
|
06eefa0798 | ||
|
|
cd2b637480 | ||
|
|
58c5a78d7b | ||
|
|
c2da6ea11e | ||
|
|
38fdf45ad4 | ||
|
|
fea9f85baf | ||
|
|
87699800ef | ||
|
|
784339b51a | ||
|
|
ea9da02f39 | ||
|
|
8a0cfaa17e | ||
|
|
bba5c11353 | ||
|
|
32aee9d277 | ||
|
|
eabb14639f | ||
|
|
22f8ab1f2b | ||
|
|
716242aabb | ||
|
|
5272c76c6f | ||
|
|
207dbcf5be | ||
|
|
51a0487c4e | ||
|
|
7c3d3c6f46 | ||
|
|
f6d6d23543 | ||
|
|
ab8d612a7c | ||
|
|
4f2481205e | ||
|
|
de367094ed | ||
|
|
4aa26bb379 | ||
|
|
cbf52ff82b | ||
|
|
016feda7c0 | ||
|
|
6bf3cf55d0 | ||
|
|
a0a6dc4b26 | ||
|
|
c989162c14 | ||
|
|
94554bb764 | ||
|
|
ebe97780d6 | ||
|
|
20f401e905 | ||
|
|
d46b4d05b1 | ||
|
|
eab1817429 | ||
|
|
ec0f44ebc0 | ||
|
|
3f10d14dfd | ||
|
|
6d2d9c0f22 | ||
|
|
115a356425 | ||
|
|
ec22653fc8 | ||
|
|
d3b00f9e2f | ||
|
|
e3fd37f2ac | ||
|
|
665446d1c2 | ||
|
|
73cae7dacd | ||
|
|
cca722e2f2 | ||
|
|
f945a5685a | ||
|
|
4d3c38e06e | ||
|
|
e91e673010 | ||
|
|
af24ba330e | ||
|
|
78ad2046dc | ||
|
|
a0edc59480 | ||
|
|
4c7a66d179 | ||
|
|
5093923f63 | ||
|
|
6174bccf66 | ||
|
|
3d159c2782 | ||
|
|
5b7865f63a | ||
|
|
0caa12eae6 | ||
|
|
e30967738e | ||
|
|
4f56da1eb3 | ||
|
|
0f3777c5f8 | ||
|
|
8441cd6d01 | ||
|
|
77f84fb9c5 | ||
|
|
5f414d27e2 | ||
|
|
5ab5bfec96 | ||
|
|
c10bba2aae | ||
|
|
12bfb75ddf | ||
|
|
ff79e8cc11 | ||
|
|
3d5c635b18 | ||
|
|
fad39897d7 | ||
|
|
f60f9c45b2 | ||
|
|
502341e338 | ||
|
|
7e10efae68 | ||
|
|
1cc8513bbc | ||
|
|
c5927e0b25 | ||
|
|
d6381256df | ||
|
|
49998aec5c | ||
|
|
971baf45ee | ||
|
|
1b6977a1d5 | ||
|
|
305cfce4f1 | ||
|
|
d7096d71a1 | ||
|
|
e31d0f5023 | ||
|
|
2b45a39cfe | ||
|
|
c5ed43168f | ||
|
|
095ce8ab7e | ||
|
|
f4839ccf45 | ||
|
|
db5a59e214 | ||
|
|
f874c17ce6 | ||
|
|
2459ba88d2 | ||
|
|
8b95bf19a1 | ||
|
|
9581d88651 | ||
|
|
844682e438 | ||
|
|
cfd087a969 | ||
|
|
6476b87f66 | ||
|
|
cacff5f908 | ||
|
|
7b6bd8ba5f | ||
|
|
9f7323acb6 | ||
|
|
0decaf1d19 | ||
|
|
2757880099 | ||
|
|
3c875bde80 | ||
|
|
dbb97f04b5 | ||
|
|
37350a766d | ||
|
|
7c5a50537c | ||
|
|
edd7092e1b | ||
|
|
35118eca26 | ||
|
|
b6a9959a13 | ||
|
|
0434f28b7f | ||
|
|
d7bcbf2d06 | ||
|
|
8abdd90470 | ||
|
|
963f7d569b | ||
|
|
bee775f6c5 | ||
|
|
8752b48dd0 | ||
|
|
e1d4dee781 | ||
|
|
91802dca8b | ||
|
|
5679999db5 | ||
|
|
11ba348962 | ||
|
|
8145c323ae | ||
|
|
713f9cb2cf | ||
|
|
4e6a719e99 | ||
|
|
08e2314c1b | ||
|
|
102b05e66c | ||
|
|
76dc89704d | ||
|
|
5cfdcd7a0e | ||
|
|
6e324bd6c4 | ||
|
|
96e58039b0 | ||
|
|
6008c37fda | ||
|
|
3fbf82d480 | ||
|
|
7f885ec1c7 | ||
|
|
e36c263947 | ||
|
|
a03c719de0 | ||
|
|
8e4652528d | ||
|
|
494fdf29ef | ||
|
|
4f0431c0c2 | ||
|
|
baa155ed24 | ||
|
|
ba3d96f0ad | ||
|
|
d34007c0f4 | ||
|
|
3368578866 | ||
|
|
f333b15a1d | ||
|
|
9645542ff6 | ||
|
|
7aef9c9716 | ||
|
|
92a1e66b25 | ||
|
|
b886eae439 | ||
|
|
0dd150af43 | ||
|
|
7f65ae56e5 | ||
|
|
cd472e037d | ||
|
|
6e7a376c0f | ||
|
|
d4f4f5b0ea | ||
|
|
43a41db67d | ||
|
|
26dc1c3742 | ||
|
|
6fa8895357 | ||
|
|
be15eaf023 | ||
|
|
21acb9e10a | ||
|
|
64eb8a1f6b | ||
|
|
5668b104fb | ||
|
|
4a2f36bf39 | ||
|
|
31b706ae4c | ||
|
|
faa6a43778 | ||
|
|
7d4d69cc13 | ||
|
|
cc3e951b44 | ||
|
|
14e45d8264 | ||
|
|
090e9b722e | ||
|
|
58ef31f163 | ||
|
|
c75a13aa75 | ||
|
|
f0e17ee7c2 | ||
|
|
6a910a597b | ||
|
|
1dbea94929 | ||
|
|
1ef3c73c0a | ||
|
|
a718923a6d | ||
|
|
0cf2c32f38 | ||
|
|
f83191aab8 | ||
|
|
da6d4181ad | ||
|
|
feae919dfa | ||
|
|
518b551d2c | ||
|
|
77c38ba107 | ||
|
|
88a94149dd | ||
|
|
3baaa0c2f7 | ||
|
|
4cdf4e5a3a | ||
|
|
3c63fe3ed3 | ||
|
|
cd6bc1f27e | ||
|
|
7308df3475 | ||
|
|
822ad89847 | ||
|
|
d79dd2b7ba | ||
|
|
2b1dbb65f0 | ||
|
|
945d5be8d7 | ||
|
|
9467611fed | ||
|
|
7eb1dfb5e9 | ||
|
|
e676528272 | ||
|
|
f6e12539f2 | ||
|
|
30c755c348 | ||
|
|
05160f716a | ||
|
|
327acc6a7e | ||
|
|
b5bbe4bcb7 | ||
|
|
191a2ed215 | ||
|
|
11fc1fb9a6 | ||
|
|
e3f4c61055 | ||
|
|
9ebf663ecb | ||
|
|
049653edb3 | ||
|
|
35d645b655 | ||
|
|
9ffdf3f568 | ||
|
|
ebeadf8c9f | ||
|
|
9ec3db7499 | ||
|
|
fcd9b2b701 | ||
|
|
40124f2c14 | ||
|
|
40d7a04e13 | ||
|
|
c9b67d7b15 | ||
|
|
c107713fa4 | ||
|
|
ad50933d72 | ||
|
|
aa71296834 | ||
|
|
99140644a1 | ||
|
|
63a78eba86 | ||
|
|
6d86fe3d67 | ||
|
|
22019fb5be | ||
|
|
b75ddf0f3b | ||
|
|
02f9a8fbb7 | ||
|
|
37df6f2c91 | ||
|
|
87c882090a | ||
|
|
4a638df300 | ||
|
|
785322b8ee | ||
|
|
9809f243b9 | ||
|
|
16f2e624b3 | ||
|
|
8380ff7db1 | ||
|
|
bd426c53a8 | ||
|
|
3b0ada5fa6 | ||
|
|
46e91cf593 | ||
|
|
67b48ac597 | ||
|
|
0a6ef392e2 | ||
|
|
8a9f7ed3be | ||
|
|
2563566117 | ||
|
|
c35dbb0d99 | ||
|
|
d4f94e7ade | ||
|
|
ae7eb22778 | ||
|
|
f3b3adc656 | ||
|
|
d19711fb8a | ||
|
|
8b4f1bc972 | ||
|
|
51e4345e7a | ||
|
|
2a21874787 | ||
|
|
4d1e78ccf3 | ||
|
|
08faafceff | ||
|
|
9e8f9bc97b | ||
|
|
81f13ea247 | ||
|
|
356ee3fe15 | ||
|
|
f618cc1d00 | ||
|
|
d53ba383b7 | ||
|
|
8ad9585d64 | ||
|
|
265f4e4b9c | ||
|
|
3308e6e211 | ||
|
|
7a8b849cbf | ||
|
|
7f2086e1b1 | ||
|
|
bc7085dfb2 | ||
|
|
b65f53379a | ||
|
|
0c6f80c3db | ||
|
|
f8781b483f | ||
|
|
8641046b0b | ||
|
|
f762a15878 | ||
|
|
465c042fae | ||
|
|
6eb8f7cdc6 | ||
|
|
74485f4200 | ||
|
|
244c95cc18 | ||
|
|
fbcf2f3fc7 | ||
|
|
368a4d1b06 | ||
|
|
4db259ddaf | ||
|
|
4d23e8d45a | ||
|
|
021a846123 | ||
|
|
d160e9a23a | ||
|
|
43e730eee6 | ||
|
|
914fd15129 | ||
|
|
ee2a5499d4 | ||
|
|
3221b5768b | ||
|
|
3444d7e9e3 | ||
|
|
74ac97d48c | ||
|
|
be493e7f21 | ||
|
|
1b57096ba1 | ||
|
|
2554c7de6c | ||
|
|
4d948eb8ff | ||
|
|
d16fcce560 | ||
|
|
ec9b1ef38f | ||
|
|
f67c119cdd | ||
|
|
69f9e9b35c | ||
|
|
5bf398d085 | ||
|
|
901b5d49e2 | ||
|
|
1663ef9d8c | ||
|
|
d2fad0d05b | ||
|
|
8b72dac83d | ||
|
|
893c496f1b | ||
|
|
cb55d024ea | ||
|
|
91f97a2669 | ||
|
|
2ea43bd79c | ||
|
|
d6581c0822 | ||
|
|
ad001a6ad2 | ||
|
|
be70267091 | ||
|
|
2d64689f67 | ||
|
|
a271c6da82 | ||
|
|
c8e2ab591f | ||
|
|
06b455f1d8 | ||
|
|
b13e44a043 | ||
|
|
4cc49444c8 | ||
|
|
1f8b65dfd8 | ||
|
|
55438a949d | ||
|
|
cba84e9e6c | ||
|
|
1f0d57310e | ||
|
|
d9117d1a27 | ||
|
|
ab85905c31 | ||
|
|
eca660d3ba | ||
|
|
420608f0b8 | ||
|
|
e97a4e38d1 | ||
|
|
107452e1dd | ||
|
|
c1fcb8a0fc | ||
|
|
3ea7403b6f | ||
|
|
901b77dd4f | ||
|
|
5899ac8066 | ||
|
|
0152233678 | ||
|
|
85f8fe63a3 | ||
|
|
0981780d6a | ||
|
|
f09c89dd43 | ||
|
|
22181a6361 | ||
|
|
bd0bdb49e2 | ||
|
|
7dbf91d5d2 | ||
|
|
07b4fb8dcc | ||
|
|
16dd97aff4 | ||
|
|
7dd407d1d2 | ||
|
|
58b7613824 | ||
|
|
f55990195f | ||
|
|
a457259302 | ||
|
|
1cc9410de1 | ||
|
|
de74515d89 | ||
|
|
3b1c4d6da1 | ||
|
|
1311836cfa | ||
|
|
9823420a2b | ||
|
|
e46a0ca5ff | ||
|
|
c6d01a0eb7 | ||
|
|
2fa5c5bd9e | ||
|
|
94f6d9ffb3 | ||
|
|
f55d9afff3 | ||
|
|
e9fbd9490f | ||
|
|
6176493f42 | ||
|
|
906dfd860c | ||
|
|
ce066037ec | ||
|
|
b829ec1e2b | ||
|
|
c5150e162a | ||
|
|
0caeb7b841 | ||
|
|
df7221afd6 | ||
|
|
39e3d06571 | ||
|
|
695e0bd911 | ||
|
|
474cab503e | ||
|
|
39ab459b55 | ||
|
|
ef256e3613 | ||
|
|
d0ff12f058 | ||
|
|
9db9d52c27 | ||
|
|
32c79ff9ed | ||
|
|
86b2a03d89 | ||
|
|
aadffe872c | ||
|
|
5eb477d608 | ||
|
|
5e24aee35f | ||
|
|
352086c567 | ||
|
|
72ede18fb8 | ||
|
|
43aa6f5859 | ||
|
|
cac041bc47 | ||
|
|
cb8d77e2a9 | ||
|
|
9a1bbb8fde | ||
|
|
2e435d14b9 | ||
|
|
1d04bbea75 | ||
|
|
9b63612559 | ||
|
|
1e34b88c05 | ||
|
|
d66c898d7c | ||
|
|
9483995fbb | ||
|
|
4b22646e04 | ||
|
|
f78211c838 | ||
|
|
536bb78984 | ||
|
|
4290b04b4d | ||
|
|
6c4605b4d0 | ||
|
|
f62f8d62f0 | ||
|
|
568f7d936d | ||
|
|
5fd7b52e52 | ||
|
|
bcfe3b928f | ||
|
|
a4f5398293 | ||
|
|
e6b293a11b | ||
|
|
37416482d3 | ||
|
|
8939704ae9 | ||
|
|
af465f2c6e | ||
|
|
0eaee78d45 | ||
|
|
31d8d765d1 | ||
|
|
dbb2316cb4 | ||
|
|
5fdad6f9db | ||
|
|
3e6d610e0d | ||
|
|
f33b32ef14 | ||
|
|
496f922d5b | ||
|
|
da7361f743 | ||
|
|
96abebd06a | ||
|
|
323f292886 | ||
|
|
58c523e6d0 | ||
|
|
9398b9622e | ||
|
|
526ac86f44 | ||
|
|
1162b30001 | ||
|
|
6266f5f3ce | ||
|
|
9e1bf0257e | ||
|
|
fc8ac0157c | ||
|
|
51f396d1b3 | ||
|
|
cfaaffc0a5 | ||
|
|
63d6429d03 | ||
|
|
3805494798 | ||
|
|
e14eb977b8 | ||
|
|
b9c4fca90e | ||
|
|
8d15120f8f | ||
|
|
282cc4ba9b | ||
|
|
1eecad61b4 | ||
|
|
28052299d9 | ||
|
|
14c7f8b208 | ||
|
|
09ab99789f | ||
|
|
86577ac403 | ||
|
|
399b1d86b2 | ||
|
|
eaece5d20a | ||
|
|
80e5eb5b56 | ||
|
|
7f125a102a | ||
|
|
29e67b6171 | ||
|
|
e8176e412a | ||
|
|
921935271b | ||
|
|
9690e9cd68 | ||
|
|
d854ec7009 | ||
|
|
664e390006 | ||
|
|
de89d8cf99 | ||
|
|
fe687e6af2 | ||
|
|
4197e1ec2e | ||
|
|
3cee20b94a | ||
|
|
4785d9acff | ||
|
|
466ce7c808 | ||
|
|
c1fcd4c9ab | ||
|
|
346bbd95b8 | ||
|
|
08950fea60 | ||
|
|
b788a19b22 | ||
|
|
78192f4590 | ||
|
|
09f4db6d30 | ||
|
|
70ef10db2b | ||
|
|
3bb039bbd3 | ||
|
|
cc62c413e0 | ||
|
|
849ceaa55d | ||
|
|
558e6c9fa0 | ||
|
|
fc77610c74 | ||
|
|
9bf7ca31c9 | ||
|
|
29747328e1 | ||
|
|
417cb412ac | ||
|
|
27ca077fef | ||
|
|
7bfc83b64f | ||
|
|
4ed51edd7d | ||
|
|
342081dba2 | ||
|
|
c70c90dad7 | ||
|
|
261ff9e693 | ||
|
|
9872d6b87c | ||
|
|
223641d255 | ||
|
|
1fee6f0a87 | ||
|
|
edcfb295a4 | ||
|
|
77c5a06141 | ||
|
|
35a597f9b4 | ||
|
|
aad8083dc1 | ||
|
|
873cc34d4f | ||
|
|
2f00a6b126 | ||
|
|
74da0d1190 | ||
|
|
e4d2c8ba07 | ||
|
|
2c5c2b50c0 | ||
|
|
6089d5cde4 | ||
|
|
26f4b69fb1 | ||
|
|
06c0a82c65 | ||
|
|
0dbcf798e2 | ||
|
|
43f2d592e9 | ||
|
|
19e17b88a1 | ||
|
|
1adfa95f00 | ||
|
|
e3ec5de2b2 | ||
|
|
f3bf60135c | ||
|
|
c2e0a04e54 | ||
|
|
f886eec2c5 | ||
|
|
ca0bd600dc | ||
|
|
6fcbfcf306 | ||
|
|
f7680c87a8 | ||
|
|
b6a4908e8c | ||
|
|
c086fbbc1e | ||
|
|
1518d1fba7 | ||
|
|
a297481096 | ||
|
|
9f77c2e351 | ||
|
|
46b0bdf600 | ||
|
|
417ccce6fc | ||
|
|
8dc0dc9a81 | ||
|
|
d0bd7cc3de | ||
|
|
e48880e010 | ||
|
|
b725da52c3 | ||
|
|
b2cce27eae | ||
|
|
17617f872e | ||
|
|
334ce30a29 | ||
|
|
fb46a66b53 | ||
|
|
d8503afd75 | ||
|
|
8b4e42298c | ||
|
|
fee022f9dc | ||
|
|
f5512e0e0f | ||
|
|
25b371097d | ||
|
|
01cbb5b505 | ||
|
|
5adb8a7367 | ||
|
|
94179bf90b | ||
|
|
aafe93be6a | ||
|
|
4041118191 | ||
|
|
5ca16e1ed1 | ||
|
|
181ac60e24 | ||
|
|
98113cb641 | ||
|
|
54fc5a7d5f | ||
|
|
8f784f0bb4 | ||
|
|
42fee40c36 | ||
|
|
2a4ee95882 | ||
|
|
55ace667b5 | ||
|
|
f32a631bad | ||
|
|
66430acb1c | ||
|
|
42f7ec8282 | ||
|
|
d23bdc88e4 | ||
|
|
d2b29f1b66 | ||
|
|
6bf2de398a | ||
|
|
43f1bbf3f2 | ||
|
|
87a1073f28 | ||
|
|
52f3d803be | ||
|
|
ac03fed343 | ||
|
|
a3ff75828e | ||
|
|
862a234f64 | ||
|
|
386ace50e9 | ||
|
|
e371d5c8b2 | ||
|
|
7ff5115277 | ||
|
|
4f877766b5 | ||
|
|
a76222b121 | ||
|
|
91439f9b9b | ||
|
|
a906451dee | ||
|
|
2e32c27f80 | ||
|
|
34cec020a2 | ||
|
|
617187cfbb | ||
|
|
de70584460 | ||
|
|
73c9be5e2a | ||
|
|
e64f40fa7b | ||
|
|
2e09369804 | ||
|
|
80953b02d0 | ||
|
|
26c4363418 | ||
|
|
ad2cb85543 | ||
|
|
00ae1ef4e9 | ||
|
|
a065356a54 | ||
|
|
6202d703cb | ||
|
|
244fa1d474 | ||
|
|
6bbda7b7db | ||
|
|
ba75de77f9 | ||
|
|
7bae68d16a | ||
|
|
59fc775c56 | ||
|
|
09bfcb3c5a | ||
|
|
b99d093979 | ||
|
|
b16c13845d | ||
|
|
a1235857d3 | ||
|
|
b25a2bc015 | ||
|
|
467396215b | ||
|
|
a71b93d4f8 | ||
|
|
6bcaf5255f | ||
|
|
ade3aa7e1a | ||
|
|
12ec989f15 | ||
|
|
82ea7faff6 | ||
|
|
7b3efc4e58 | ||
|
|
0c7b8350ae | ||
|
|
ba8ef3fbc1 | ||
|
|
b46cf40bad | ||
|
|
66dab6f64e | ||
|
|
9b3b148d6f | ||
|
|
7b8e0f0e0d | ||
|
|
6c1e29c2f1 | ||
|
|
e28181634e | ||
|
|
e07b9fc1e2 | ||
|
|
18835ed1be | ||
|
|
d6fc3c3160 | ||
|
|
7d32a91e62 | ||
|
|
6956bd3f61 | ||
|
|
a082608827 | ||
|
|
b7620fb40c | ||
|
|
8e8d61086a | ||
|
|
4d91e39b36 | ||
|
|
4bbae25452 | ||
|
|
6e911bb1ef | ||
|
|
058b122bb3 | ||
|
|
a78812acb5 | ||
|
|
d7abafd44a | ||
|
|
02ae5132a3 | ||
|
|
89bb9ec2e1 | ||
|
|
f4547da79d | ||
|
|
39e67f56b1 | ||
|
|
80cdfb938c | ||
|
|
2376ebce99 | ||
|
|
2023061ae2 | ||
|
|
792a3e5ada | ||
|
|
6020200423 | ||
|
|
00ee3b6296 | ||
|
|
35b4d04ab0 | ||
|
|
f9d3da45c4 | ||
|
|
07a5c06501 | ||
|
|
d382f91ee0 | ||
|
|
ebb40d7101 | ||
|
|
6404dcd266 | ||
|
|
5d3d4c8865 | ||
|
|
1acd1b7998 | ||
|
|
3430006aa4 | ||
|
|
7872d7fef2 | ||
|
|
fb2c6d2125 | ||
|
|
d1e6738239 | ||
|
|
01051e9de0 | ||
|
|
61c11e268d | ||
|
|
840744da70 | ||
|
|
0a3107c38e | ||
|
|
c40123e415 | ||
|
|
92c9335aae | ||
|
|
b28cfcaf05 | ||
|
|
296f41a462 | ||
|
|
c9c974fcab | ||
|
|
5c7288c1e2 | ||
|
|
de564c0c63 | ||
|
|
10d3f3c219 | ||
|
|
c14a4a2bf4 | ||
|
|
d3a2fbd192 | ||
|
|
29c8b191d3 | ||
|
|
ebba823c0b | ||
|
|
ec5c8d90f3 | ||
|
|
f573d0f0e9 | ||
|
|
0508d05e0f | ||
|
|
23541830ef | ||
|
|
75134f939b | ||
|
|
e0be76857e | ||
|
|
3bd23f1a3b | ||
|
|
d16d47c89a | ||
|
|
be9f564f4b | ||
|
|
c0da4d337c | ||
|
|
948e64df7e | ||
|
|
d4d800a22c | ||
|
|
09b687ecdb | ||
|
|
e401a5b184 | ||
|
|
15e917f86b | ||
|
|
4a1600787c | ||
|
|
58bee91e28 | ||
|
|
a855788c5f | ||
|
|
649e3d4e5d | ||
|
|
ec4747524a | ||
|
|
6eff5d8900 | ||
|
|
5ca43848ae | ||
|
|
ab5f59602f | ||
|
|
8aaccd356e | ||
|
|
4b9285cecd | ||
|
|
2f8c956510 | ||
|
|
1cca662e54 | ||
|
|
b3f3b9a298 | ||
|
|
da7d630c44 | ||
|
|
85fd6ca95b | ||
|
|
f74a1684f4 | ||
|
|
8841975bf1 | ||
|
|
13762286b8 | ||
|
|
a621e52fc2 | ||
|
|
12760d7706 | ||
|
|
c1b3ac7254 | ||
|
|
96007ae056 | ||
|
|
6682e1271f | ||
|
|
c2d5e4367d | ||
|
|
3aa78eb33f | ||
|
|
c4fafb89ea | ||
|
|
9e10c3a290 | ||
|
|
ab939fc4e2 | ||
|
|
f973afcc80 | ||
|
|
b653f5d6f2 | ||
|
|
299f9ead0b | ||
|
|
c823856098 | ||
|
|
8da9854b42 | ||
|
|
ea3228b226 | ||
|
|
ed18119a55 | ||
|
|
d2a0ee82ef | ||
|
|
4540e5be3a | ||
|
|
d810c27775 | ||
|
|
8eee1aae51 | ||
|
|
6e3abf198e | ||
|
|
51b2363031 | ||
|
|
b45dc3ddd4 | ||
|
|
34d2c4709e | ||
|
|
4c14ad3299 | ||
|
|
e134edcc41 | ||
|
|
0c95f0304d | ||
|
|
5505f5745c | ||
|
|
6918deef71 | ||
|
|
2504a3cc8a | ||
|
|
1ed53cf630 | ||
|
|
aa258f1991 | ||
|
|
b87893420b | ||
|
|
0acd38fa7d | ||
|
|
a4d091c857 | ||
|
|
a4113243d9 | ||
|
|
faf3394c46 | ||
|
|
a026244d5a | ||
|
|
4596d47d6c | ||
|
|
b9984d2f92 | ||
|
|
8e6ecb9466 | ||
|
|
26ccd824a4 | ||
|
|
cbe99119a2 | ||
|
|
8ca0f319a1 | ||
|
|
ee439d0ba9 | ||
|
|
28fbf5029a | ||
|
|
a860c85c71 | ||
|
|
3cdbc5b09b | ||
|
|
cbabfbe942 | ||
|
|
cbea626d96 | ||
|
|
171ec457f1 | ||
|
|
793559c67c | ||
|
|
b9fe22f3a5 | ||
|
|
b5e67330b1 | ||
|
|
6fad7ef3c2 | ||
|
|
0f3ae1cf22 | ||
|
|
73664b5d19 | ||
|
|
36bc1a9d65 | ||
|
|
04b870d288 | ||
|
|
f45f8cf2dc | ||
|
|
4e1aa9fa05 | ||
|
|
093c56f24e | ||
|
|
9960c80351 | ||
|
|
45bc187ee4 | ||
|
|
434b1e2818 | ||
|
|
b0904132c4 | ||
|
|
d376fd318f | ||
|
|
2d62a03bb2 | ||
|
|
8e72e0e20b | ||
|
|
48a90ae2dc | ||
|
|
40c15acde9 | ||
|
|
3033f87e41 | ||
|
|
6450d0a307 | ||
|
|
985137df8d | ||
|
|
fe3cd8cabe | ||
|
|
25cbd87648 | ||
|
|
1cf10bbdc6 | ||
|
|
02e0b0e2c7 | ||
|
|
a41a296b55 | ||
|
|
45b3f4ec8f | ||
|
|
82461287a4 | ||
|
|
10f005fc7d | ||
|
|
81ff7ce484 | ||
|
|
33c2cdc9d9 | ||
|
|
f1e3ba9eca | ||
|
|
7bf68cbf8e | ||
|
|
38adfff384 | ||
|
|
98a0e42f20 | ||
|
|
ca0f632790 | ||
|
|
08f0bdead4 | ||
|
|
313b16530a | ||
|
|
8b28d01bfe | ||
|
|
16a27bddf7 | ||
|
|
4467f11d93 | ||
|
|
e1464c379c | ||
|
|
9446262246 | ||
|
|
e187065900 | ||
|
|
ee942a1263 | ||
|
|
e290bf19e6 | ||
|
|
6254bbccd4 | ||
|
|
3b6b3534dc | ||
|
|
290227762c | ||
|
|
fb5e550057 | ||
|
|
78b9707fde | ||
|
|
7b3ae89420 | ||
|
|
db1fcfe116 | ||
|
|
0423e49ffa | ||
|
|
6829c3095d | ||
|
|
4c3d6721a5 | ||
|
|
5f9d2bb7d9 | ||
|
|
4639b4e7c3 | ||
|
|
7137399964 | ||
|
|
f1181fce54 | ||
|
|
63e03cded9 | ||
|
|
76489c840c | ||
|
|
e7845e2cd9 | ||
|
|
a10c8946a9 | ||
|
|
055f5e4082 | ||
|
|
001f130cb1 | ||
|
|
df87953a41 | ||
|
|
99172cfea9 | ||
|
|
2acdfecc9b | ||
|
|
f7b1aae324 | ||
|
|
2b5b80ee6b | ||
|
|
15efa88933 | ||
|
|
acb590679c | ||
|
|
a0c65fe9bd | ||
|
|
b02861bfea | ||
|
|
42b0982ae6 | ||
|
|
393aa59608 | ||
|
|
5a3048745f | ||
|
|
e4f5bbe3d2 | ||
|
|
1a55b7aa44 | ||
|
|
62fab47791 | ||
|
|
914efc79d1 | ||
|
|
761c2c10ed | ||
|
|
ba3dd1c855 | ||
|
|
eb9c914548 | ||
|
|
3b67364b5c | ||
|
|
dcf28baef8 | ||
|
|
667cf31ea1 | ||
|
|
fa22cfe2d5 | ||
|
|
b27aa974cd | ||
|
|
027ac23760 | ||
|
|
9f58db799f | ||
|
|
b829d05346 | ||
|
|
d1e382e8e4 | ||
|
|
34304b936f | ||
|
|
a11c1a7092 | ||
|
|
d7858b8fb4 | ||
|
|
72f38c101d | ||
|
|
15be3b0eb0 | ||
|
|
69a0823db2 | ||
|
|
7321d323ee | ||
|
|
98eacbacb9 | ||
|
|
99f16bdcc6 | ||
|
|
436e838778 | ||
|
|
11bd69cf2d |
22
.gitignore
vendored
22
.gitignore
vendored
@@ -1,15 +1,7 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
.idea
|
||||
.terraform
|
||||
*.tfstate.*
|
||||
*.tfstate
|
||||
kubeconfig.yaml
|
||||
.DS_Store
|
||||
site/
|
||||
23
.pre-commit-config.yaml
Normal file
23
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# This is a configuration for https://pre-commit.com/.
|
||||
# On macOS, try `brew install pre-commit` and then run `pre-commit install`.
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
# TODO: find a version of this to validate ytt templates?
|
||||
# - id: check-yaml
|
||||
# args: ['--allow-multiple-documents']
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
- id: check-byte-order-marker
|
||||
- id: detect-private-key
|
||||
- id: mixed-line-ending
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: validate-copyright-year
|
||||
name: Validate copyright year
|
||||
entry: hack/check-copyright-year.sh
|
||||
language: script
|
||||
603
AD-SETUP.md
Normal file
603
AD-SETUP.md
Normal file
@@ -0,0 +1,603 @@
|
||||
# Creating an Active Directory server on Google Cloud for Pinniped integration tests
|
||||
|
||||
This documents the steps that were taken to create our test AD server used by the integration tests.
|
||||
The integration tests use LDAPS and StartTLS to connect to the AD server.
|
||||
|
||||
## Create a Windows Server VM and configure it as an AD Domain Controller
|
||||
|
||||
The steps in this section were mostly inspired by
|
||||
https://cloud.google.com/architecture/deploy-an-active-directory-forest-on-compute-engine.
|
||||
|
||||
From your Mac, create a VPC, subnet, firewall rules, admin password, reserved static IP, and the VM itself.
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Login as yourself.
|
||||
gcloud auth login
|
||||
|
||||
# Set some variables.
|
||||
project="REDACTED" # Change this to be the actual project name before running these commands.
|
||||
region="us-west1"
|
||||
zone="us-west1-c"
|
||||
vpc_name="ad"
|
||||
|
||||
# Create VPC.
|
||||
gcloud compute networks create ${vpc_name} \
|
||||
--project ${project} \
|
||||
--description "VPC network to deploy Active Directory" \
|
||||
--subnet-mode custom
|
||||
|
||||
# Create subnet.
|
||||
# The google tutorial says to "enable Private Google Access so that Windows can activate without internet access."
|
||||
gcloud compute networks subnets create domain-controllers \
|
||||
--project ${project} --region ${region} \
|
||||
--network ${vpc_name} \
|
||||
--range "10.0.0.0/28" \
|
||||
--enable-private-ip-google-access
|
||||
|
||||
# Create a firewall rule to allow RDP. Find out what your public IP address is by going to https://whatismyipaddress.com.
|
||||
# Replace the X.X.X.X placeholder address shown here with your real IPv4 address.
|
||||
my_ip=X.X.X.X
|
||||
gcloud compute firewall-rules create allow-rdp-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:3389 \
|
||||
--source-ranges "${my_ip}/32" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Allow LDAPS (port 636) from the whole internet.
|
||||
gcloud compute firewall-rules create allow-ldaps-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:636 \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Allow LDAP (port 389) from the whole internet, to allow the integration tests to use StartTLS.
|
||||
gcloud compute firewall-rules create allow-ldap-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:389 \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Reserve a static public IP address for the domain controller VM.
|
||||
addressOfDc1=$(gcloud compute addresses create ad-domain-controller \
|
||||
--project ${project} --region ${region} \
|
||||
--format="value(address)")
|
||||
|
||||
# Create an admin password for the Administrator user on Windows, and save it to secrets manager.
|
||||
password="$(openssl rand -hex 8)-$(openssl rand -hex 8)"
|
||||
echo -n "$password" > password.tmp
|
||||
gcloud secrets create active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
--data-file password.tmp
|
||||
rm password.tmp
|
||||
|
||||
# This creates a service account called ad-domaincontroller@PROJECT_NAME.iam.gserviceaccount.com
|
||||
# (where PROJECT_NAME is the actual GCP project name) and sets the account name to the
|
||||
# variable $dcServiceAccount.
|
||||
dcServiceAccount=$(gcloud iam service-accounts create ad-domaincontroller \
|
||||
--project ${project} \
|
||||
--display-name "AD Domain Controller VM Service Account" \
|
||||
--format "value(email)")
|
||||
|
||||
# Allow the new service account to temporarily read the Windows admin password from secret manager.
|
||||
# The following `date` command might only work on MacOS. It prints the time like this: 2024-10-23T19:20:36Z
|
||||
one_hour_from_now=$(TZ=UTC date -v "+1H" +"%Y-%m-%dT%H:%M:%SZ")
|
||||
gcloud secrets add-iam-policy-binding active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
"--member=serviceAccount:$dcServiceAccount" \
|
||||
--role=roles/secretmanager.secretAccessor \
|
||||
--condition="title=Expires after 1h,expression=request.time < timestamp('$one_hour_from_now')"
|
||||
|
||||
# Optional: list all bindings to see the binding that you just created.
|
||||
gcloud secrets get-iam-policy active-directory-dc1-password \
|
||||
--project ${project}
|
||||
|
||||
# Create a powershell startup script in a local file.
|
||||
cat <<"EOF" > dc-startup.ps1
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
#
|
||||
# Only run the script if the VM is not a domain controller already.
|
||||
#
|
||||
if ((Get-CimInstance -ClassName Win32_OperatingSystem).ProductType -eq 2) {
|
||||
exit
|
||||
}
|
||||
|
||||
#
|
||||
# Read configuration from metadata.
|
||||
#
|
||||
Import-Module "${Env:ProgramFiles}\Google\Compute Engine\sysprep\gce_base.psm1"
|
||||
|
||||
Write-Host "Reading metadata..."
|
||||
$ActiveDirectoryDnsDomain = Get-MetaData -Property "attributes/ActiveDirectoryDnsDomain" -instance_only
|
||||
$ActiveDirectoryNetbiosDomain = Get-MetaData -Property "attributes/ActiveDirectoryNetbiosDomain" -instance_only
|
||||
$ProjectId = Get-MetaData -Property "project-id" -project_only
|
||||
$AccessToken = (Get-MetaData -Property "service-accounts/default/token" | ConvertFrom-Json).access_token
|
||||
|
||||
#
|
||||
# Read the DSRM password from secret manager.
|
||||
#
|
||||
Write-Host "Reading secret from secret manager..."
|
||||
$Secret = (Invoke-RestMethod `
|
||||
-Headers @{
|
||||
"Metadata-Flavor" = "Google";
|
||||
"x-goog-user-project" = $ProjectId;
|
||||
"Authorization" = "Bearer $AccessToken"} `
|
||||
-Uri "https://secretmanager.googleapis.com/v1/projects/$ProjectId/secrets/active-directory-dc1-password/versions/latest:access")
|
||||
$DsrmPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Secret.payload.data))
|
||||
$DsrmPassword = ConvertTo-SecureString -AsPlainText $DsrmPassword -force
|
||||
|
||||
#
|
||||
# Promote.
|
||||
#
|
||||
Write-Host "Setting administrator password..."
|
||||
Set-LocalUser -Name Administrator -Password $DsrmPassword
|
||||
|
||||
Write-Host "Creating a new forest $ActiveDirectoryDnsDomain ($ActiveDirectoryNetbiosDomain)..."
|
||||
Install-ADDSForest `
|
||||
-DomainName $ActiveDirectoryDnsDomain `
|
||||
-DomainNetbiosName $ActiveDirectoryNetbiosDomain `
|
||||
-SafeModeAdministratorPassword $DsrmPassword `
|
||||
-DomainMode Win2008R2 `
|
||||
-ForestMode Win2008R2 `
|
||||
-InstallDns `
|
||||
-CreateDnsDelegation:$False `
|
||||
-NoRebootOnCompletion:$True `
|
||||
-Confirm:$false
|
||||
|
||||
#
|
||||
# Configure DNS.
|
||||
#
|
||||
Write-Host "Configuring DNS settings..."
|
||||
Get-Netadapter| Disable-NetAdapterBinding -ComponentID ms_tcpip6
|
||||
Set-DnsClientServerAddress `
|
||||
-InterfaceIndex (Get-NetAdapter -Name Ethernet).InterfaceIndex `
|
||||
-ServerAddresses 127.0.0.1
|
||||
|
||||
#
|
||||
# Enable LSA protection.
|
||||
#
|
||||
New-ItemProperty `
|
||||
-Path "HKLM:\SYSTEM\CurrentControlSet\Control\Lsa" `
|
||||
-Name "RunAsPPL" `
|
||||
-Value 1 `
|
||||
-PropertyType DWord
|
||||
|
||||
Write-Host "Restarting to apply all settings..."
|
||||
Restart-Computer
|
||||
EOF
|
||||
|
||||
# Create a domain controller VM.
|
||||
# E2 are the cheapest VMs. e2-medium has 2 vCPUs (shared with other customers) and 4 GB of memory.
|
||||
# See https://cloud.google.com/compute/docs/general-purpose-machines#e2-shared-core.
|
||||
# When we originally set up this VM, we actually started it as n2-standard-2 and after we
|
||||
# finished setting up everything as shown in this guide, then we stopped the VM and changed its
|
||||
# type to e2-medium and started the VM again. Maybe it would work fine to create it as
|
||||
# e2-medium from the beginning, but note that we didn't actually test that.
|
||||
gcloud compute instances create active-directory-dc1 \
|
||||
--project ${project} \
|
||||
--zone ${zone} \
|
||||
--image-family windows-2022 \
|
||||
--image-project windows-cloud \
|
||||
--machine-type e2-medium \
|
||||
--tags ad-domaincontroller \
|
||||
--metadata "ActiveDirectoryDnsDomain=activedirectory.test.pinniped.dev,ActiveDirectoryNetbiosDomain=pinniped-ad,sysprep-specialize-script-ps1=Install-WindowsFeature AD-Domain-Services -IncludeManagementTools; Install-WindowsFeature DNS,disable-account-manager=true" \
|
||||
--metadata-from-file windows-startup-script-ps1=dc-startup.ps1 \
|
||||
--address ${addressOfDc1} \
|
||||
--subnet=domain-controllers \
|
||||
--service-account "$dcServiceAccount" \
|
||||
--scopes cloud-platform \
|
||||
--shielded-integrity-monitoring \
|
||||
--shielded-secure-boot \
|
||||
--shielded-vtpm
|
||||
|
||||
# Monitor the initialization process of the first domain controller by viewing its serial port output.
|
||||
# It should install the sysprep stuff, reboot, run our startup script, and then reboot again.
|
||||
gcloud compute instances tail-serial-port-output active-directory-dc1 \
|
||||
--project ${project} \
|
||||
--zone ${zone}
|
||||
# Use CTRL-C to cancel tailing the output.
|
||||
```
|
||||
|
||||
## Update DNS
|
||||
|
||||
Update the Cloud DNS entry for `activedirectory.test.pinniped.dev.` to be an "A" record pointing to the
|
||||
public static IP of the VM. This is easier to do in the Cloud DNS UI in your browser.
|
||||
It would take many gcloud CLI commands to accomplish the same task.
|
||||
|
||||
## Configure test users and groups
|
||||
|
||||
Make sure you have an RDP client installed. On a Mac, you can install RDP from the App Store.
|
||||
It was recently renamed "Windows App".
|
||||
|
||||
Note: To copy/paste in the RDP client, you may need to use CTRL-C/CTRL-V if CMD-C/CMD-V don't work.
|
||||
|
||||
RDP into the Windows VM. To connect, use `activedirectory.test.pinniped.dev` as the name of the server,
|
||||
the username `Administrator`, and the password from the `active-directory-dc1-password` entry in Secrets Manager.
|
||||
You can ignore the RDP certificate error.
|
||||
|
||||
In your RDP session, open Powershell. Then run the following commands to add some users and groups,
|
||||
change the password policy, and grant some permissions.
|
||||
|
||||
Before running the commands, replace the redacted passwords as follows:
|
||||
- The value for `REDACTED_BIND_USER_PASSWORD` can be found at `aws-ad-bind-account-password` in the `concourse-secrets` secret
|
||||
- The value for `REDACTED_PINNY_USER_PASSWORD` can be found at `aws-ad-user-password` in the `concourse-secrets` secret
|
||||
- The value for `REDACTED_DEACTIVATED_USER_PASSWORD` can be found at `aws-ad-deactivated-user-password` in the `concourse-secrets` secret
|
||||
|
||||
```shell
|
||||
New-ADOrganizationalUnit -Name "pinniped-ad" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
New-ADOrganizationalUnit -Name "Users" `
|
||||
-Path "OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
New-ADOrganizationalUnit -Name "test-users" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-Description "integration tests will create and delete ephemeral users here" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
# Print all OUs to validate that they were created.
|
||||
Get-ADOrganizationalUnit -Filter *
|
||||
|
||||
New-ADUser -Name "Bind User" -SamAccountName "bind-user" -GivenName "Bind" -Surname "User" -DisplayName "Bind User" `
|
||||
-UserPrincipalName "bind-user@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_BIND_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $true -PasswordNeverExpires $true
|
||||
|
||||
# Note that the value of EmailAddress is not a real email address, but that's okay.
|
||||
New-ADUser -Name "Pinny Seal" -SamAccountName "pinny" -GivenName "Pinny" -Surname "Seal" -DisplayName "Pinny Seal" `
|
||||
-UserPrincipalName "pinny@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-EmailAddress "tanzu-user-authentication@groups.vmware.com" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_PINNY_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $true -PasswordNeverExpires $true
|
||||
|
||||
New-ADUser -Name "Deactivated User" -SamAccountName "deactivated-user" -GivenName "Deactivated" -Surname "User" -DisplayName "Deactivated User" `
|
||||
-UserPrincipalName "deactivated-user@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_DEACTIVATED_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $false -PasswordNeverExpires $true
|
||||
|
||||
# Take note of the pinny account's ObjectGUID. You will need to edit the concourse-secrets secret later to update this GUID value.
|
||||
# This value should look something like "288188dd-ab76-4f61-b6e4-c72e081502c5".
|
||||
Get-ADUser pinny -Properties * | Select SamaccountName,ObjectGUID
|
||||
|
||||
# Print all users to validate that they were created.
|
||||
Get-ADUser -Filter *
|
||||
|
||||
New-ADGroup -Name "Marine Mammals" -SamAccountName "Marine Mammals" -DisplayName "Marine Mammals" `
|
||||
-GroupCategory Security -GroupScope Global `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
|
||||
Add-ADGroupMember -Identity "Marine Mammals" -Members "pinny"
|
||||
|
||||
New-ADGroup -Name "Mammals" -SamAccountName "Mammals" -DisplayName "Mammals" `
|
||||
-GroupCategory Security -GroupScope Global `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
|
||||
Add-ADGroupMember -Identity "Mammals" -Members "Marine Mammals"
|
||||
|
||||
# Change the default password policy. There are some integration tests that rely on this.
|
||||
# This is the equivalent of doing this in the Windows "Active Directory Administrative Center" UI:
|
||||
# check "enforce account lockout policy", give it 20 failed attempts and a 15-minute reset, then
|
||||
# uncheck "enforce minimum password age" so we can change the password immediately upon creating a user.
|
||||
Set-ADDefaultDomainPasswordPolicy -Identity "activedirectory.test.pinniped.dev" `
|
||||
-LockoutThreshold 20 -LockoutDuration "00:15:00" -LockoutObservationWindow "00:15:00" `
|
||||
-MinPasswordAge 0
|
||||
|
||||
# Print the policy to validate that it was updated.
|
||||
Get-ADDefaultDomainPasswordPolicy
|
||||
|
||||
# We need to allow the bind-user to create/delete/edit users and groups within the test-users OU, because several
|
||||
# integration tests want to crate/delete/edit ephemeral test users and groups.
|
||||
# These access control steps were inspired by https://the-itguy.de/delegate-access-in-active-directory-with-powershell/.
|
||||
# This is intended to be the equivalent of using the UI to assign permissions like this: right click on "test-users",
|
||||
# select Delegate Control, select "bind-user" as the user, select "create, delete and manage user accounts" and
|
||||
# "reset user passwords" as the tasks to delegate.
|
||||
function New-ADDGuidMap
|
||||
{
|
||||
$rootdse = Get-ADRootDSE
|
||||
$guidmap = @{ }
|
||||
$GuidMapParams = @{
|
||||
SearchBase = ($rootdse.SchemaNamingContext)
|
||||
LDAPFilter = "(schemaidguid=*)"
|
||||
Properties = ("lDAPDisplayName", "schemaIDGUID")
|
||||
}
|
||||
Get-ADObject @GuidMapParams | ForEach-Object { $guidmap[$_.lDAPDisplayName] = [System.GUID]$_.schemaIDGUID }
|
||||
return $guidmap
|
||||
}
|
||||
$GuidMap = New-ADDGuidMap
|
||||
$BindUserSID = New-Object System.Security.Principal.SecurityIdentifier (Get-ADUser "bind-user").SID
|
||||
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
$ace1 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["user"]
|
||||
$ace2 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["user"], "All"
|
||||
$ace3 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["group"]
|
||||
$ace4 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["group"], "All"
|
||||
$acl.AddAccessRule($ace1)
|
||||
$acl.AddAccessRule($ace2)
|
||||
$acl.AddAccessRule($ace3)
|
||||
$acl.AddAccessRule($ace4)
|
||||
Set-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" -AclObject $acl
|
||||
|
||||
# Print the access control rules that were just applied.
|
||||
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
$acl.Access | Where-Object { $_.IdentityReference -eq "pinniped-ad\bind-user" }
|
||||
```
|
||||
|
||||
If you would like to see these OUs, users, and groups in the UI, you can open the "Active Directory Users and Computers"
|
||||
app in your RDP session.
|
||||
|
||||
## Configure a CA and a serving certificate for LDAPS
|
||||
|
||||
Now we need to create and configure a TLS serving certificate for LDAPS.
|
||||
|
||||
The certificate needs to include two hostnames. One of the hostnames is the name that the AD server
|
||||
thinks is its own hostname (`active-directory-dc1.activedirectory.test.pinniped.dev`).
|
||||
This is how the AD server will decide to use this cert for the LDAPS port.
|
||||
The other hostname is the one that clients will use when making connections from the outside
|
||||
(`activedirectory.test.pinniped.dev`) so they can validate the server certificate.
|
||||
|
||||
The steps here were inspired by https://gist.github.com/magnetikonline/0ccdabfec58eb1929c997d22e7341e45.
|
||||
|
||||
On your mac:
|
||||
|
||||
```shell
|
||||
# On your Mac: Create a self-signed CA public/private keypair.
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout ad-ca.key -out ad-ca.crt \
|
||||
-sha256 -days 36500 -nodes \
|
||||
-subj "/C=US/ST=California/L=San Francisco/O=Pinniped/OU=Pinniped CI/CN=Pinniped AD CA"
|
||||
|
||||
# Copy the public key to your clipboard.
|
||||
cat ad-ca.crt| pbcopy
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the public
|
||||
# key again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\ca.crt"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\ca.crt"
|
||||
|
||||
# Import root certificate into trusted store of domain controller in your Powershell terminal:
|
||||
Import-Certificate -FilePath "C:\users\administrator\desktop\ca.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
```
|
||||
|
||||
If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
|
||||
and look in the folder called "Trusted Root Certification Authorities\Certificates".
|
||||
If the UI was already open, click the refresh button.
|
||||
|
||||
Copy the following file contents to your clipboard:
|
||||
|
||||
```shell
|
||||
[Version]
|
||||
Signature="$Windows NT$"
|
||||
|
||||
[NewRequest]
|
||||
Subject = "CN=activedirectory.test.pinniped.dev"
|
||||
KeySpec = 1
|
||||
KeyLength = 2048
|
||||
Exportable = TRUE
|
||||
MachineKeySet = TRUE
|
||||
SMIME = FALSE
|
||||
PrivateKeyArchive = FALSE
|
||||
UserProtected = FALSE
|
||||
UseExistingKeySet = FALSE
|
||||
ProviderName = "Microsoft RSA SChannel Cryptographic Provider"
|
||||
ProviderType = 12
|
||||
RequestType = PKCS10
|
||||
KeyUsage = 0xa0
|
||||
|
||||
[EnhancedKeyUsageExtension]
|
||||
OID = 1.3.6.1.5.5.7.3.1 ; Server Authentication
|
||||
|
||||
[Extensions]
|
||||
2.5.29.17 = "{text}"
|
||||
_continue_ = "DNS=activedirectory.test.pinniped.dev"
|
||||
_continue_ = "DNS=active-directory-dc1.activedirectory.test.pinniped.dev"
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to copy the file contents
|
||||
# from above again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\request.inf"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\request.inf"
|
||||
|
||||
# Create a CSR. This command will also generate a private key for the AD server and save it.
|
||||
certreq -new "C:\users\administrator\desktop\request.inf" "C:\users\administrator\desktop\client.csr"
|
||||
|
||||
# Show the CSR.
|
||||
type "C:\users\administrator\desktop\client.csr"
|
||||
|
||||
# Copy the content of this file to your clipboard.
|
||||
Get-Content "C:\users\administrator\desktop\client.csr" | Set-Clipboard
|
||||
```
|
||||
|
||||
On your mac:
|
||||
|
||||
```shell
|
||||
# On your Mac, use the CA to issue a serving cert based on the CSR.
|
||||
pbpaste > client.csr
|
||||
|
||||
cat <<EOF > v3ext.txt
|
||||
keyUsage=digitalSignature,keyEncipherment
|
||||
extendedKeyUsage=serverAuth
|
||||
subjectKeyIdentifier=hash
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = activedirectory.test.pinniped.dev
|
||||
DNS.2 = active-directory-dc1.activedirectory.test.pinniped.dev
|
||||
EOF
|
||||
|
||||
# Create a cert from the CSR signed by the CA.
|
||||
openssl x509 \
|
||||
-req -days 36500 \
|
||||
-in client.csr -CA ad-ca.crt -CAkey ad-ca.key -extfile v3ext.txt \
|
||||
-set_serial 01 -out client.crt
|
||||
|
||||
# Inspect the generated certificate.
|
||||
# Ensure the following X509v3 extensions are all present:
|
||||
# Key Usage: Digital Signature, Key Encipherment
|
||||
# Extended Key Usage: TLS Web Server Authentication
|
||||
# Subject Key Identifier
|
||||
# Subject Alternative Name with 2 DNS hostnames
|
||||
# Authority Key Identifier
|
||||
openssl x509 -in client.crt -text
|
||||
|
||||
# Copy the generated cert.
|
||||
cat client.crt | pbcopy
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the file contents
|
||||
# from above again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# Add the serving certificate to Windows. This will also automatically associate it to the private key that you
|
||||
# generated with the previous usage of certreq.
|
||||
certreq -accept "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
|
||||
# and look in the folder called "Personal\Certificates". If the UI was already open, click the refresh button.
|
||||
# Double click on the cert. Ensure that it says, "you have a private key that corresponds to this certificate".
|
||||
# Next, we need to reboot the VM for the cert to get picked up and used for serving incoming LDAPS connections.
|
||||
# After showing you a warning dialog box, this should terminate your RDP session and stop the VM.
|
||||
shutdown /s
|
||||
```
|
||||
|
||||
Wait for the VM to stop, then start the VM again from your Mac:
|
||||
|
||||
```shell
|
||||
gcloud compute instances start active-directory-dc1 --project ${project} --zone ${zone}
|
||||
```
|
||||
|
||||
Wait for the VM to finish booting. Then we can confirm that LDAPS is working. On your Mac:
|
||||
|
||||
```shell
|
||||
# Check that serving cert is being returned on the LDAPS port. This command should show the cert chain.
|
||||
# It should also verify the server cert using our CA. The output should include "Verify return code: 0 (ok)".
|
||||
openssl s_client -connect activedirectory.test.pinniped.dev:636 -showcerts -CAfile ad-ca.crt < /dev/null
|
||||
|
||||
# Unfortunately, the ldapsearch command that comes pre-installed on MacOS does not seem to respect
|
||||
# the LDAPTLS_CACERT env variable. So it will not be able to validate the server certificates.
|
||||
# As a workaround, we can use docker to run ldapsearch commands in a linux container.
|
||||
|
||||
# Test the regular LDAP port by issuing a query on your Mac. The -ZZ option asks it to use StartTLS.
|
||||
# This should list all users. Replace REDACTED_BIND_USER_PASSWORD with the real password.
|
||||
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
|
||||
ldapsearch -d8 -v -x -ZZ -H 'ldap://activedirectory.test.pinniped.dev' \
|
||||
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-w 'REDACTED_BIND_USER_PASSWORD' \
|
||||
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-s sub \
|
||||
'(objectClass=user)' '*'
|
||||
|
||||
# Test the LDAPS port by issuing a query on your Mac. This should list all users.
|
||||
# Replace REDACTED_BIND_USER_PASSWORD with the real password.
|
||||
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
|
||||
ldapsearch -d8 -v -x -H 'ldaps://activedirectory.test.pinniped.dev' \
|
||||
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-w 'REDACTED_BIND_USER_PASSWORD' \
|
||||
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-s sub \
|
||||
'(objectClass=user)' '*'
|
||||
```
|
||||
|
||||
## Update the `concourse-secrets` secret in GCP Secrets Manager
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Copy the CA's public cert.
|
||||
cat ad-ca.crt | base64 | pbcopy
|
||||
|
||||
# cd to your local clone of the `ci` branch of the pinniped repo
|
||||
cd pinniped-ci-branch
|
||||
|
||||
# Edit the secret.
|
||||
./hack/edit-gcloud-secret.sh concourse-secret
|
||||
# This opens vim to edit the secret.
|
||||
# Paste the cert as the value for `aws-ad-ca-data`.
|
||||
# Also edit the the value of `aws-ad-user-unique-id-attribute-value`. The value should be the ObjectGUID of the pinny
|
||||
# user that you created in the steps above.
|
||||
# Save your changes, exit vim, and when prompted say that you want to save this as the new version of concourse-secrets.
|
||||
```
|
||||
|
||||
## Confirm that Active Directory integration tests can pass
|
||||
|
||||
Use these commands run all the Active Directory integration tests on your Mac.
|
||||
The `-run` filter is based on the tests as they existed at the time of writing this doc.
|
||||
You can find AD tests by searching for `SkipTestWhenActiveDirectoryIsUnavailable`.
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Login so we can read the secrets from GCP Secret Manager.
|
||||
gcloud auth login
|
||||
|
||||
# cd to your local git clone
|
||||
cd pinniped
|
||||
|
||||
# Compile and install onto a local kind cluster.
|
||||
./hack/prepare-for-integration-tests.sh -c --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh"
|
||||
|
||||
# Run all the tests that depend on AD.
|
||||
source /tmp/integration-test-env && go test -v -race -count 1 -timeout 0 ./test/integration \
|
||||
-run "/TestSupervisorLogin_Browser/active_directory|/TestE2EFullIntegration_Browser/with_Supervisor_ActiveDirectory|/TestActiveDirectoryIDPPhaseAndConditions_Parallel|/TestSupervisorWarnings_Browser/Active_Directory"
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Remove all bindings for the service account from the secret.
|
||||
# The binding was only needed during the first boot of the VM.
|
||||
gcloud secrets remove-iam-policy-binding active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
--member "serviceAccount:${dcServiceAccount}" --role roles/secretmanager.secretAccessor \
|
||||
--all
|
||||
|
||||
# Remove the firewall rule which allows incoming RDP connections.
|
||||
# If you need to RDP to this AD VM in the future, then you will need to create
|
||||
# a new firewall rule to allow it.
|
||||
gcloud compute firewall-rules delete allow-rdp-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--quiet
|
||||
|
||||
# Remove all temp files. It's okay to remove the private key for our CA because we
|
||||
# created certs that are good for 100 years, as long as you have already added the
|
||||
# public cert to the concourse-secrets secret. If we need to create a new AD VM, we
|
||||
# can also create a new CA.
|
||||
rm ad-ca.crt ad-ca.key client.crt client.csr v3ext.txt
|
||||
```
|
||||
1
CODE_OF_CONDUCT.md
Normal file
1
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/CODE_OF_CONDUCT.md
|
||||
1
CONTRIBUTING.md
Normal file
1
CONTRIBUTING.md
Normal file
@@ -0,0 +1 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/CONTRIBUTING.md
|
||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
MAINTAINERS.md
Normal file
1
MAINTAINERS.md
Normal file
@@ -0,0 +1 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/MAINTAINERS.md
|
||||
220
README.md
220
README.md
@@ -1 +1,219 @@
|
||||
# placeholder-name
|
||||
# Pinniped's `ci` branch
|
||||
|
||||
This `ci` branch contains the CI/CD tooling for [Pinniped](https://github.com/vmware/pinniped).
|
||||
|
||||
The documentation and code in this branch is mainly intended for the maintainers of Pinniped.
|
||||
|
||||
This branch is not intended to be merged to the `main` branch.
|
||||
|
||||
The code in the branch previously lived in a private repository. It was made public by moving
|
||||
the code into the `ci` branch of the Pinniped repository in late 2024. The previous git history
|
||||
for these files was not copied from the private repository at the time of this migration.
|
||||
|
||||
## Reporting an issue in this branch
|
||||
|
||||
Found a bug or would like to make an enhancement request?
|
||||
Please report issues in [this repo](https://github.com/vmware/pinniped).
|
||||
|
||||
## Reporting security vulnerabilities
|
||||
|
||||
Please follow the procedure described in [SECURITY.md](https://github.com/vmware/pinniped/blob/main/SECURITY.md).
|
||||
|
||||
## Creating a release
|
||||
|
||||
When the team is preparing to ship a release, a maintainer will create a new
|
||||
GitHub [Issue](https://github.com/vmware/pinniped/issues/new/choose) in this repo to
|
||||
collaboratively track progress on the release checklist. As tasks are completed,
|
||||
the team will check them off. When all the tasks are completed, the issue is closed.
|
||||
|
||||
The release checklist is committed to this repo as an [issue template](https://github.com/vmware/pinniped/tree/main/.github/ISSUE_TEMPLATE/release_checklist.md).
|
||||
|
||||
## Pipelines
|
||||
|
||||
Pinniped uses [Concourse](https://concourse-ci.org) for CI/CD.
|
||||
We are currently running our Concourse on a network that can only be reached from inside the corporate network at [ci.pinniped.broadcom.net](https://ci.pinniped.broadcom.net).
|
||||
|
||||
The following pipelines are implemented in this branch. Not all pipelines are necessarily publicly visible, although our goal is to make them all visible.
|
||||
|
||||
- `main`
|
||||
|
||||
This is the main pipeline that runs on merges to `main`. It builds, tests, and (when manually triggered) releases from main.
|
||||
|
||||
- `pull-requests`
|
||||
|
||||
This is a pipeline that triggers for each open pull request. It runs a smaller subset of the integration tests and validations as `pinniped`.
|
||||
|
||||
- `dockerfile-builders`
|
||||
|
||||
This pipeline builds a bunch of custom utility container images that are used in our CI and testing.
|
||||
|
||||
- `build-gi-cli` (a container image that includes the GitHub CLI)
|
||||
- `build-github-pr-resource` (a [fork](https://github.com/pinniped-ci-bot/github-pr-resource) of the `github-pr-resource` with support for gating PRs for untrusted users)
|
||||
- `build-code-coverage-uploader` (uploading code coverage during unit tests)
|
||||
- `build-eks-deployer-dockerfile` (deploying our app to EKS clusters)
|
||||
- `build-k8s-app-deployer-dockerfile` (deploying our app to clusters)
|
||||
- `build-pool-trigger-resource-dockerfile` (an updated implementation of the [pool-trigger-resource](https://github.com/cfmobile/pool-trigger-resource) for use in our CI)
|
||||
- `build-integration-test-runner-dockerfile` (running our integration tests)
|
||||
- `build-integration-test-runner-beta-dockerfile` (running our integration tests with the latest Chrome beta version)
|
||||
- `build-deployment-yaml-formatter-dockerfile` (templating our deployment YAML during a release)
|
||||
- `build-crane` (copy and tag container images during release)
|
||||
- `build-k8s-code-generator-*` (running our Kubernetes code generation under different Kubernetes dependency versions)
|
||||
- `build-test-dex` (a Dex used during tests)
|
||||
- `build-test-cfssl` (a cfssl used during tests)
|
||||
- `build-test-kubectl` (a kubectl used during tests)
|
||||
- `build-test-forward-proxy` (a Squid forward proxy used during tests)
|
||||
- `build-test-bitnami-ldap` (an OpenLDAP used during tests)
|
||||
|
||||
- `cleanup-aws`
|
||||
|
||||
This runs a script that runs [aws-nuke](https://github.com/rebuy-de/aws-nuke) against our test AWS account.
|
||||
This was occasionally needed because [eksctl](https://eksctl.io/) sometimes fails and leaks AWS resources. These resources cost money and use up our AWS quota.
|
||||
However, we seem to have worked around these issues and this pipeline has not been used for some time.
|
||||
|
||||
These jobs are only triggered manually. This is dangerous and should be used with care.
|
||||
|
||||
- `concourse-workers`
|
||||
|
||||
Deploys worker replicas on a long-lived GKE cluster that runs the Concourse workers, and can scale them up or down.
|
||||
|
||||
- `go-compatibility`
|
||||
|
||||
This pipeline runs nightly jobs that validate the compatibility of our code as a Go module in various contexts. We have jobs that test that our code compiles under older Go versions and that our CLI can be installed using `go install`.
|
||||
|
||||
- `security-scan`
|
||||
|
||||
This pipeline has nightly jobs that run security scans on our current main branch and most recently released artifacts.
|
||||
|
||||
The tools we use are:
|
||||
- [sonatype-nexus-community/nancy](https://github.com/sonatype-nexus-community/nancy), which scans Go module versions.
|
||||
- [aquasecurity/trivy](https://github.com/aquasecurity/trivy), which scans container images and Go binaries.
|
||||
- [govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck), which scans Go code to find calls to known-vulnerable dependencies.
|
||||
|
||||
This pipeline also has a job called `all-golang-deps-updated` which automatically submits PRs to update all
|
||||
direct dependencies in Pinniped's go.mod file, and update the Golang and distroless container images used in
|
||||
Pinniped's Dockerfiles.
|
||||
|
||||
- `kind-node-builder`
|
||||
|
||||
A nightly build job which uses the latest version of kind to build the HEAD of master of Kubernetes as a container
|
||||
image that can be used to deploy kind clusters. Other pipelines use this container image to install Pinniped and run
|
||||
integration tests. This gives us insight in any compatibility problems with the upcoming next release of Kubernetes.
|
||||
|
||||
## Deploying pipeline changes
|
||||
|
||||
After any shared tasks (`./pipelines/shared-tasks`) or helpers (`./pipelines/shared-helpers`) are edited,
|
||||
the commits must be pushed to the `ci` branch of this repository to take effect.
|
||||
|
||||
After editing any CI secrets or pipeline definitions, a maintainer must run the corresponding
|
||||
`./pipelines/$PIPELINE_NAME/update-pipeline.sh` script to apply the changes to Concourse.
|
||||
To deploy _all_ pipelines, a maintainer can run `./pipelines/update-all-pipelines.sh`.
|
||||
Don't forget to commit and push your changes after applying them!
|
||||
|
||||
## Github webhooks for pipelines
|
||||
|
||||
Some pipelines use github [webhooks to trigger resource checks](https://concourse-ci.org/resources.html#schema.resource.webhook_token),
|
||||
rather than the default of polling every minute, to make these pipelines more responsive and use fewer compute resources
|
||||
for running checks. Refer to places where `webhook_token` is configured in various `pipeline.yml` files.
|
||||
|
||||
To make these webhooks work, they must be defined on the [GitHub repo's settings](https://github.com/vmware/pinniped/settings/hooks).
|
||||
|
||||
## Installing and operating Concourse
|
||||
|
||||
See [infra/README.md](./infra/README.md) for details about how Concourse was installed and how it can be operated.
|
||||
|
||||
## Acceptance environments
|
||||
|
||||
In addition to the many ephemeral Kubernetes clusters we use for testing, we also deploy a long-running acceptance environment.
|
||||
|
||||
Google Kubernetes Engine (GKE) in the `gke-acceptance-cluster` cluster in our GCP project in the `us-west1-c` availability zone.
|
||||
|
||||
To access this cluster, download the kubeconfig to `gke-acceptance.yaml` by running:
|
||||
|
||||
```cmd
|
||||
KUBECONFIG=gke-acceptance.yaml gcloud container clusters get-credentials gke-acceptance-cluster --project "$PINNIPED_GCP_PROJECT" --zone us-west1-c
|
||||
```
|
||||
|
||||
The above command assumes that you have already set `PINNIPED_GCP_PROJECT` to be the name of the GCP project.
|
||||
|
||||
## CI secrets
|
||||
|
||||
We use [Google Secret Manager](https://cloud.google.com/secret-manager) on GCP to store build/test/release secrets.
|
||||
These secrets are only available to the maintainers.
|
||||
|
||||
Using the `gcloud secrets list` command or the [web console](https://console.cloud.google.com/security/secret-manager),
|
||||
you can list the available secrets. The content of each secret is a YAML file with secret key/value pairs.
|
||||
You can also use the `./hack/edit-gcloud-secret.sh <secretName>` script to edit or inspect each secret.
|
||||
|
||||
## Configure Azure for CI to test on AKS
|
||||
|
||||
There are several CI jobs which test that Pinniped works when installed on Azure's AKS.
|
||||
For these jobs to run, they need to be able to create and delete ephemeral AKS clusters.
|
||||
This requires the following:
|
||||
|
||||
1. An active Azure Subscription. (A "subscription" in Azure is the equivalent of an "account" in AWS or a "project" in GCP.)
|
||||
2. An Azure App Registration (basically, a service account) active in the same Directory (aka tenant) as the Subscription.
|
||||
Create the app in "My Organization Only". It does not need a redirect URI or any other optional settings.
|
||||
Create a client secret for this app. If you want the client secret to have a long lifetime, you can use the `az` CLI to create it.
|
||||
In the Subscription's IAM settings, assign this app the role "Azure Kubernetes Service Contributor Role" to allow
|
||||
the app to manage AKS clusters. Also assign this app the role "Reader" to allow it to read all resources
|
||||
(used by the `remove-orphaned-aks-clusters` CI task).
|
||||
Do not grant this app permissions in any other Subscription or use it for any other purpose.
|
||||
3. Configure the pipelines with the app's Application (client) ID, Client Secret, and Directory (tenant) ID
|
||||
as the appropriate secret values.
|
||||
|
||||
The CI jobs will create and delete AKS clusters in a Resource Group called `pinniped-ci` within the provided Subscription.
|
||||
|
||||
## Configure AWS for CI to test on EKS
|
||||
|
||||
There are several CI jobs which test that Pinniped works when installed on Amazon's EKS.
|
||||
For these jobs to run, they need to be able to create and delete ephemeral EKS clusters.
|
||||
There are also some jobs to cleanup any orphaned resources (e.g. IP addresses) in the AWS account.
|
||||
These jobs requires the following:
|
||||
|
||||
1. An active AWS account, which will only be used for this purpose.
|
||||
2. Two IAM users in that account, each with a role that can be assumed.
|
||||
These IAM users which should only be used for Pinniped CI and no other purpose.
|
||||
They should only have permissions to perform AWS actions in the relevant AWS account, and no other account.
|
||||
3. The first user and role should have permission to create and delete EKS clusters using `eksctl`.
|
||||
The permissions required can be found in the [eksctl docs](https://eksctl.io/usage/minimum-iam-policies).
|
||||
The user also needs permission to run `aws logs put-retention-policy`, `aws ec2 describe-nat-gateways`,
|
||||
and `aws ec2 delete-nat-gateway`.
|
||||
4. The second user and role should have broad permissions to get and delete everything in the account.
|
||||
It will be used to run `aws-nuke` to list and/or clean resources from the AWS account.
|
||||
To use `aws-nuke`, the user also needs to have an AWS account alias
|
||||
(see the [cleanup-aws task](pipelines/shared-tasks/cleanup-aws/task.sh) for details).
|
||||
|
||||
## Setting Up Active Directory Test Environment
|
||||
|
||||
To test the `ActiveDirectoryIdentityProvider` functionality, we have a long-running Active Directory Domain Controller
|
||||
server instance in our GCP account. See [AD-SETUP.md](AD-SETUP.md) for details.
|
||||
|
||||
## Running integration tests on your laptop using AD
|
||||
|
||||
The relevant environment variables can be pulled from the secret manager via the `hack/get-active-directory-env-vars.sh` script.
|
||||
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
|
||||
|
||||
```bash
|
||||
# Must authenticate to glcoud to access the secret manager.
|
||||
gcloud auth login
|
||||
# In the pinniped repo's main branch or in your PR branch:
|
||||
hack/prepare-for-integration-tests.sh --get-active-directory-vars "$HOME/path/to/pinniped-ci-branch/hack/get-active-directory-env-vars.sh"
|
||||
```
|
||||
|
||||
## Running integration tests on your laptop using GitHub
|
||||
|
||||
The relevant environment variables can be pulled from the secret manager via the `hack/get-github-env-vars.sh` script.
|
||||
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
|
||||
|
||||
```bash
|
||||
# Must authenticate to glcoud to access the secret manager.
|
||||
gcloud auth login
|
||||
# In the pinniped repo's main branch or in your PR branch:
|
||||
hack/prepare-for-integration-tests.sh --get-github-vars "$HOME/path/to/pinniped-ci-branch/hack/get-github-env-vars.sh"
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Pinniped is open source and licensed under Apache License Version 2.0. See [LICENSE](LICENSE).
|
||||
|
||||
Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
|
||||
1
SECURITY.md
Normal file
1
SECURITY.md
Normal file
@@ -0,0 +1 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/SECURITY.md
|
||||
14
dockerfiles/code-coverage-uploader/Dockerfile
Normal file
14
dockerfiles/code-coverage-uploader/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running Go linters
|
||||
FROM debian:13.2-slim AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -sfLo /tmp/codecov https://uploader.codecov.io/latest/linux/codecov
|
||||
RUN chmod +x /tmp/codecov
|
||||
|
||||
FROM golang:1.25.5
|
||||
RUN apt-get update -y && apt-get dist-upgrade -y
|
||||
COPY --from=builder /tmp/codecov /usr/local/bin/codecov
|
||||
10
dockerfiles/crane/Dockerfile
Normal file
10
dockerfiles/crane/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM gcr.io/go-containerregistry/crane as crane
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
FROM golang:1.25.5
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin
|
||||
COPY --from=crane /ko-app/crane /usr/local/bin
|
||||
ENTRYPOINT ["bash"]
|
||||
16
dockerfiles/deployment-yaml-formatter/Dockerfile
Normal file
16
dockerfiles/deployment-yaml-formatter/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
FROM debian:13.2-slim
|
||||
|
||||
# Note: libdigest-sha-perl is to get shasum, which is used when installing Carvel tools below.
|
||||
RUN apt-get update && apt-get install -y ca-certificates jq curl libdigest-sha-perl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
29
dockerfiles/eks-deployer/Dockerfile
Normal file
29
dockerfiles/eks-deployer/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For deploying an EKS cluster and setting it up to run our tests.
|
||||
|
||||
FROM weaveworks/eksctl:v0.221.0 AS eksctl
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
FROM amazon/aws-cli:2.32.30
|
||||
RUN yum update -y && yum install -y jq perl-Digest-SHA openssl && yum clean all
|
||||
COPY --from=eksctl eksctl /usr/local/bin/eksctl
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install latest kubectl.
|
||||
RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod u+x /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator.
|
||||
# This gets installed automatically via eksctl, but currently it downloads v0.5.2,
|
||||
# which will give us a v1alpha1 execcredential rather than a v1beta1 which we want.
|
||||
# When this has changed, we can delete this:
|
||||
# https://github.com/weaveworks/eksctl/blob/main/build/docker/Dockerfile#L49
|
||||
RUN curl -sfL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /usr/local/bin/aws-iam-authenticator \
|
||||
&& chmod u+x /usr/local/bin/aws-iam-authenticator
|
||||
15
dockerfiles/gh-cli/Dockerfile
Normal file
15
dockerfiles/gh-cli/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running the GitHub CLI.
|
||||
FROM debian:13.2-slim AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl \
|
||||
-sfLo /tmp/gh.tar.gz \
|
||||
https://github.com/cli/cli/releases/download/v2.40.0/gh_2.40.0_linux_amd64.tar.gz \
|
||||
&& tar -C /tmp --strip-components=1 -xzvf /tmp/gh.tar.gz
|
||||
|
||||
FROM golang:1.25.5
|
||||
COPY --from=builder /tmp/bin/gh /usr/local/bin/gh
|
||||
80
dockerfiles/integration-test-runner-beta/Dockerfile
Normal file
80
dockerfiles/integration-test-runner-beta/Dockerfile
Normal file
@@ -0,0 +1,80 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running the integration tests as a client to a k8s cluster
|
||||
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
# We need gcloud for running integration tests against GKE
|
||||
# because the kubeconfig uses gcloud as an `auth-provider`.
|
||||
# Use FROM gcloud-sdk instead of FROM golang because its
|
||||
# a lot easier to install Go than to install gcloud in the
|
||||
# subsequent commands below.
|
||||
FROM google/cloud-sdk:551.0.0-slim
|
||||
|
||||
# Install apache2-utils (for htpasswd to bcrypt passwords for the
|
||||
# local-user-authenticator) and jq.
|
||||
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Print version of gke-gcloud-auth-plugin
|
||||
RUN gke-gcloud-auth-plugin --version
|
||||
|
||||
# Create a non-root user account that can be used to run the tests.
|
||||
RUN useradd --create-home testrunner
|
||||
|
||||
# Install latest beta chrome.
|
||||
RUN \
|
||||
chown root:root /tmp && \
|
||||
chmod 1777 /tmp && \
|
||||
curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \
|
||||
echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
|
||||
apt-get -y update && \
|
||||
apt-get -y install google-chrome-beta
|
||||
|
||||
# Output Chrome version used
|
||||
RUN google-chrome --version
|
||||
|
||||
# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
RUN curl -fsSL https://go.dev/dl/go1.25.5.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
|
||||
tar -C /usr/local -xzf /tmp/go.tar.gz && \
|
||||
rm /tmp/go.tar.gz && \
|
||||
go version
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
WORKDIR $GOPATH
|
||||
|
||||
# Install go tools gotestsum and test2json to record the test output in a nice format.
|
||||
RUN go install gotest.tools/gotestsum@latest
|
||||
RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \
|
||||
mv test2json /usr/local/bin/test2json
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
|
||||
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod 0755 /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator
|
||||
RUN curl -fsSL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /bin/aws-iam-authenticator \
|
||||
&& chmod 0755 /bin/aws-iam-authenticator
|
||||
|
||||
# Install TMC CLI.
|
||||
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
|
||||
#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
|
||||
# | jq -r .versions[].linuxX64 \
|
||||
# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \
|
||||
# tmc version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# install latest nmap
|
||||
RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\
|
||||
alien nmap-7.92-1.x86_64.rpm &&\
|
||||
dpkg -i nmap_7.92-2_amd64.deb
|
||||
80
dockerfiles/integration-test-runner/Dockerfile
Normal file
80
dockerfiles/integration-test-runner/Dockerfile
Normal file
@@ -0,0 +1,80 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running the integration tests as a client to a k8s cluster
|
||||
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
# We need gcloud for running integration tests against GKE
|
||||
# because the kubeconfig uses gcloud as an `auth-provider`.
|
||||
# Use FROM gcloud-sdk instead of FROM golang because its
|
||||
# a lot easier to install Go than to install gcloud in the
|
||||
# subsequent commands below.
|
||||
FROM google/cloud-sdk:551.0.0-slim
|
||||
|
||||
# Install apache2-utils (for htpasswd to bcrypt passwords for the
|
||||
# local-user-authenticator) and jq.
|
||||
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Print version of gke-gcloud-auth-plugin
|
||||
RUN gke-gcloud-auth-plugin --version
|
||||
|
||||
# Create a non-root user account that can be used to run the tests.
|
||||
RUN useradd --create-home testrunner
|
||||
|
||||
# Install latest stable chrome.
|
||||
RUN \
|
||||
chown root:root /tmp && \
|
||||
chmod 1777 /tmp && \
|
||||
curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \
|
||||
echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
|
||||
apt-get -y update && \
|
||||
apt-get -y install google-chrome-stable
|
||||
|
||||
# Output Chrome version used
|
||||
RUN google-chrome --version
|
||||
|
||||
# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
RUN curl -fsSL https://go.dev/dl/go1.25.5.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
|
||||
tar -C /usr/local -xzf /tmp/go.tar.gz && \
|
||||
rm /tmp/go.tar.gz && \
|
||||
go version
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
WORKDIR $GOPATH
|
||||
|
||||
# Install go tools gotestsum and test2json to record the test output in a nice format.
|
||||
RUN go install gotest.tools/gotestsum@latest
|
||||
RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \
|
||||
mv test2json /usr/local/bin/test2json
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
|
||||
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod 0755 /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator
|
||||
RUN curl -fsSL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /bin/aws-iam-authenticator \
|
||||
&& chmod 0755 /bin/aws-iam-authenticator
|
||||
|
||||
# Install TMC CLI.
|
||||
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
|
||||
#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
|
||||
# | jq -r .versions[].linuxX64 \
|
||||
# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \
|
||||
# tmc version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# install latest nmap
|
||||
RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\
|
||||
alien nmap-7.92-1.x86_64.rpm &&\
|
||||
dpkg -i nmap_7.92-2_amd64.deb
|
||||
34
dockerfiles/k8s-app-deployer/Dockerfile
Normal file
34
dockerfiles/k8s-app-deployer/Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For deploying apps onto Kubernetes clusters (including GKE)
|
||||
|
||||
FROM google/cloud-sdk:551.0.0-slim
|
||||
|
||||
# Install apache2-utils (for htpasswd to bcrypt passwords for the
|
||||
# local-user-authenticator) and jq.
|
||||
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps dnsutils google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Print version of gke-gcloud-auth-plugin
|
||||
RUN gke-gcloud-auth-plugin --version
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install latest kubectl.
|
||||
RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod u+x /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator
|
||||
RUN curl -sfL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /bin/aws-iam-authenticator \
|
||||
&& chmod u+x /bin/aws-iam-authenticator
|
||||
|
||||
# Install TMC CLI.
|
||||
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
|
||||
#RUN curl -sfL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
|
||||
# | jq -r .versions[].linuxX64 \
|
||||
# | xargs curl -sfL -o /bin/tmc && chmod +x /bin/tmc && \
|
||||
# tmc version
|
||||
20
dockerfiles/k8s-code-generator/Dockerfile
Normal file
20
dockerfiles/k8s-code-generator/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
ARG GO_VERSION
|
||||
|
||||
FROM golang:${GO_VERSION}
|
||||
|
||||
ARG GO_VERSION
|
||||
ARG K8S_PKG_VERSION
|
||||
ARG CONTROLLER_GEN_VERSION
|
||||
ARG CRD_REF_DOCS_COMMIT_SHA
|
||||
|
||||
ENV GO_VERSION=$GO_VERSION
|
||||
ENV K8S_PKG_VERSION=$K8S_PKG_VERSION
|
||||
ENV CONTROLLER_GEN_VERSION=$CONTROLLER_GEN_VERSION
|
||||
ENV CRD_REF_DOCS_COMMIT_SHA=$CRD_REF_DOCS_COMMIT_SHA
|
||||
|
||||
COPY setup.sh /codegen/
|
||||
|
||||
RUN /codegen/setup.sh
|
||||
116
dockerfiles/k8s-code-generator/setup.sh
Executable file
116
dockerfiles/k8s-code-generator/setup.sh
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [ -z "$GO_VERSION" ]; then
|
||||
echo "missing GO_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$K8S_PKG_VERSION" ]; then
|
||||
echo "missing K8S_PKG_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$CONTROLLER_GEN_VERSION" ]; then
|
||||
echo "missing CONTROLLER_GEN_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Debugging output for CI...
|
||||
echo "GO_VERSION: $GO_VERSION"
|
||||
echo "K8S_PKG_VERSION: $K8S_PKG_VERSION"
|
||||
echo "CONTROLLER_GEN_VERSION: $CONTROLLER_GEN_VERSION"
|
||||
echo "CRD_REF_DOCS_COMMIT_SHA: $CRD_REF_DOCS_COMMIT_SHA"
|
||||
|
||||
apt-get update -y && apt-get dist-upgrade -y
|
||||
|
||||
cd /codegen/
|
||||
|
||||
cat <<EOF >tools.go
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
_ "k8s.io/api/core/v1"
|
||||
_ "k8s.io/code-generator"
|
||||
)
|
||||
EOF
|
||||
|
||||
cat <<EOF >go.mod
|
||||
module codegen
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
k8s.io/apimachinery v$K8S_PKG_VERSION
|
||||
k8s.io/code-generator v$K8S_PKG_VERSION
|
||||
k8s.io/api v$K8S_PKG_VERSION
|
||||
)
|
||||
EOF
|
||||
|
||||
# Resolve dependencies and download the modules.
|
||||
echo "Running go mod tidy ..."
|
||||
go mod tidy
|
||||
echo "Running go mod download ..."
|
||||
go mod download
|
||||
|
||||
# Copy the downloaded source code of k8s.io/code-generator so we can "go install" all its commands.
|
||||
rm -rf "$(go env GOPATH)/src"
|
||||
mkdir -p "$(go env GOPATH)/src/k8s.io"
|
||||
cp -pr "$(go env GOMODCACHE)/k8s.io/code-generator@v$K8S_PKG_VERSION" "$(go env GOPATH)/src/k8s.io/code-generator"
|
||||
|
||||
# Install the commands to $GOPATH/bin. Also sed the related shell scripts, but leave those in the src dir.
|
||||
# Note that update-codegen.sh invokes these shell scripts at this src path.
|
||||
# The sed is a dirty hack to avoid having the code-generator shell scripts run go install again.
|
||||
# In version 0.23.0 the line inside the shell script that previously said "go install ..." started
|
||||
# to instead say "GO111MODULE=on go install ..." so this sed is a little wrong, but still seems to work.
|
||||
echo "Running go install for all k8s.io/code-generator commands ..."
|
||||
# Using sed to edit the go.mod file (and then running go mod tidy) is a dirty hack to work around
|
||||
# an issue introduced in Go v1.25. See https://github.com/golang/go/issues/74462.
|
||||
# The version of code-generator used by Kube 1.30 depends on x/tools v0.18.0.
|
||||
# The version of code-generator used by Kube 1.31 depends on x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d.
|
||||
# Other versions of Kube use code-generator versions which do not have this problem.
|
||||
(cd "$(go env GOPATH)/src/k8s.io/code-generator" &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.18\.0#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.21\.1-.*#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
go mod tidy &&
|
||||
go install -v ./cmd/... &&
|
||||
sed -i -E -e 's/(go install.*)/# \1/g' ./*.sh)
|
||||
|
||||
if [[ ! -f "$(go env GOPATH)/bin/openapi-gen" ]]; then
|
||||
# Starting in Kube 1.30, openapi-gen moved from k8s.io/code-generator to k8s.io/kube-openapi.
|
||||
# Assuming that we are still in the /codegen directory, get the specific version of kube-openapi
|
||||
# that is selected as an indirect dependency by the go.mod.
|
||||
kube_openapi_version=$(go list -m k8s.io/kube-openapi | cut -f2 -d' ')
|
||||
# Install that version of its openapi-gen command.
|
||||
echo "Running go install for openapi-gen $kube_openapi_version ..."
|
||||
# Using sed to edit the go.mod file (and then running go mod tidy) is a dirty hack to work around
|
||||
# an issue introduced in Go v1.25. See https://github.com/golang/go/issues/74462.
|
||||
# If this were not needed, then we could just use "go install" directly without
|
||||
# copying the source code or editing the go.mod file (which is what this script used to do),
|
||||
# like this: go install -v "k8s.io/kube-openapi/cmd/openapi-gen@$kube_openapi_version"
|
||||
# The version of kube-openapi used by Kube 1.30 (and maybe 1.31) depends on x/tools v0.18.0.
|
||||
# The version of kube-openapi used by Kube 1.32 depends on x/tools v0.24.0.
|
||||
# Other versions of Kube use kube-openapi versions which do not have this problem.
|
||||
cp -pr "$(go env GOMODCACHE)/k8s.io/kube-openapi@$kube_openapi_version" "$(go env GOPATH)/src/k8s.io/kube-openapi"
|
||||
(cd "$(go env GOPATH)/src/k8s.io/kube-openapi" &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.18\.0#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.24\.0#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
go mod tidy &&
|
||||
go install -v ./cmd/openapi-gen)
|
||||
fi
|
||||
|
||||
echo "Running go install for controller-gen ..."
|
||||
go install -v sigs.k8s.io/controller-tools/cmd/controller-gen@v$CONTROLLER_GEN_VERSION
|
||||
|
||||
# We use a commit sha instead of a release semver because this project does not create
|
||||
# releases very often. They seem to only release 1-2 times per year, but commit to
|
||||
# main more often.
|
||||
echo "Running go install for crd-ref-docs ..."
|
||||
go install -v github.com/elastic/crd-ref-docs@$CRD_REF_DOCS_COMMIT_SHA
|
||||
|
||||
# List all the commands that we just installed.
|
||||
echo "Installed the following commands to $(go env GOPATH)/bin:"
|
||||
ls "$(go env GOPATH)/bin"
|
||||
17
dockerfiles/pool-trigger-resource/Dockerfile
Normal file
17
dockerfiles/pool-trigger-resource/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# We would like to use https://github.com/cfmobile/pool-trigger-resource for our pool recycle jobs.
|
||||
# Unfortuntely, the pool-trigger-resource repo seems like it is not maintained by anyone. The most recent
|
||||
# commit was six years ago. On the other hand, its implementation is a shell script which basically
|
||||
# just calls some git commands, so it shouldn't need much maintaince if it works.
|
||||
# This is an updated version of https://github.com/cfmobile/pool-trigger-resource/blob/master/Dockerfile
|
||||
# to use newer versions of linux, jq, and git. The "assets" directory's source code is copied from
|
||||
# https://github.com/cfmobile/pool-trigger-resource/tree/master/assets as of commit efefe018c88e937.
|
||||
|
||||
FROM debian:13.2-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates jq git && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD assets/ /opt/resource/
|
||||
RUN chmod +rx /opt/resource/*
|
||||
219
dockerfiles/pool-trigger-resource/assets/check
Executable file
219
dockerfiles/pool-trigger-resource/assets/check
Executable file
@@ -0,0 +1,219 @@
|
||||
#!/bin/sh
|
||||
# vim: set ft=sh
|
||||
|
||||
set -e
|
||||
|
||||
exec 3>&1 # make stdout available as fd 3 for the result
|
||||
exec 1>&2 # redirect all output to stderr for logging
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
. "$(dirname "$0")"/common.sh
|
||||
|
||||
# for jq
|
||||
PATH=/usr/local/bin:$PATH
|
||||
|
||||
payload=$TMPDIR/git-resource-request
|
||||
|
||||
cat > "$payload" <&0
|
||||
|
||||
|
||||
uri=$(jq -r '.source.uri // ""' < "$payload")
|
||||
branch=$(jq -r '.source.branch // ""' < "$payload")
|
||||
pool_name=$(jq -r '.source.pool // ""' < "$payload")
|
||||
ref=$(jq -r '.version.ref // ""' < "$payload")
|
||||
|
||||
if [ -z "$uri" ]; then
|
||||
config_errors="${config_errors}invalid payload (missing uri)
|
||||
"
|
||||
fi
|
||||
|
||||
if [ -z "$branch" ]; then
|
||||
config_errors="${config_errors}invalid payload (missing branch)
|
||||
"
|
||||
fi
|
||||
|
||||
if [ -z "$pool_name" ]; then
|
||||
config_errors="${config_errors}invalid payload (missing pool)
|
||||
"
|
||||
fi
|
||||
|
||||
if [ -n "$config_errors" ]; then
|
||||
echo "$config_errors"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
###########
|
||||
#
|
||||
# end processing inputs
|
||||
#
|
||||
###########
|
||||
|
||||
###########
|
||||
#
|
||||
# start git setup
|
||||
#
|
||||
###########
|
||||
|
||||
load_pubkey "$payload"
|
||||
|
||||
destination=$TMPDIR/git-resource-repo-cache
|
||||
|
||||
if [ -d "$destination" ]; then
|
||||
cd "$destination"
|
||||
git fetch
|
||||
git reset --hard FETCH_HEAD
|
||||
else
|
||||
branchflag=""
|
||||
if [ -n "$branch" ]; then
|
||||
branchflag="--branch $branch"
|
||||
fi
|
||||
|
||||
git clone "$uri" $branchflag "$destination"
|
||||
cd "$destination"
|
||||
fi
|
||||
|
||||
|
||||
git config user.name "CI Pool Trigger Resource"
|
||||
git config user.email "ci-pool-trigger@localhost"
|
||||
|
||||
###########
|
||||
#
|
||||
# end git setup
|
||||
#
|
||||
###########
|
||||
|
||||
|
||||
###########
|
||||
#
|
||||
# start calculating pending triggers
|
||||
#
|
||||
###########
|
||||
|
||||
if [ -n "$ref" ] && git cat-file -e "$ref"; then
|
||||
ref_exists_and_is_valid=yes
|
||||
fi
|
||||
|
||||
if [ -e "$pool_name/.pending-triggers" ] && [ -e "$pool_name/.pending-removals" ]; then
|
||||
tally_files_exist=yes
|
||||
|
||||
#check validity of tally files
|
||||
fi
|
||||
|
||||
if [ -n "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then
|
||||
files_changed=$(git show --pretty="format:" --name-status -r "$ref"..HEAD -- "$pool_name"/unclaimed/)
|
||||
|
||||
set +e
|
||||
added_items=$(echo "$files_changed" | grep "^A")
|
||||
removed_items=$(echo "$files_changed" | grep "^D")
|
||||
set -e
|
||||
|
||||
if [ -n "$added_items" ]; then
|
||||
num_added_items=$(echo "$added_items" | wc -l)
|
||||
else
|
||||
num_added_items=0
|
||||
fi
|
||||
|
||||
if [ -n "$removed_items" ]; then
|
||||
num_removed_items=$(echo "$removed_items" | wc -l)
|
||||
else
|
||||
num_removed_items=0
|
||||
fi
|
||||
|
||||
old_pending_triggers=$(cat "$pool_name"/.pending-triggers)
|
||||
old_pending_removals=$(cat "$pool_name"/.pending-removals)
|
||||
|
||||
pending_triggers=$(( old_pending_triggers + num_added_items ))
|
||||
|
||||
if [ "$num_removed_items" -gt "$old_pending_removals" ]; then
|
||||
extra_removals=$(( num_removed_items - old_pending_removals ))
|
||||
pending_removals=0
|
||||
pending_triggers=$(( pending_triggers - extra_removals ))
|
||||
else
|
||||
pending_removals=$(( old_pending_removals - num_removed_items ))
|
||||
fi
|
||||
else
|
||||
pending_triggers=$(find "$pool_name"/unclaimed -not -path "*/\.*" -path "$pool_name/unclaimed/*"| wc -l)
|
||||
pending_removals=0
|
||||
fi
|
||||
###########
|
||||
#
|
||||
# end calculating pending triggers
|
||||
#
|
||||
###########
|
||||
|
||||
|
||||
###########
|
||||
#
|
||||
# start handling results
|
||||
#
|
||||
###########
|
||||
|
||||
if [ "$pending_triggers" -gt 0 ]; then
|
||||
last_commit=$(git log -1 --pretty='format:%H')
|
||||
result=$(echo "$last_commit" | jq -R '.' | jq -s "map({ref: .})")
|
||||
else
|
||||
result="[]"
|
||||
fi
|
||||
|
||||
###########
|
||||
#
|
||||
# end handling results
|
||||
#
|
||||
###########
|
||||
|
||||
|
||||
|
||||
###########
|
||||
#
|
||||
# start updating triggers
|
||||
#
|
||||
###########
|
||||
|
||||
if [ "$pending_triggers" -gt 0 ]; then
|
||||
new_pending_triggers=$(( pending_triggers - 1 ))
|
||||
new_pending_removals=$(( pending_removals + 1 ))
|
||||
echo "$new_pending_triggers" > "$pool_name"/.pending-triggers
|
||||
echo "$new_pending_removals" > "$pool_name"/.pending-removals
|
||||
git add "$pool_name"/.pending*
|
||||
|
||||
commit_message="triggering build with pending triggers: $new_pending_triggers; pending removals: $new_pending_removals"
|
||||
|
||||
if [ -n "$ref_exists_and_is_valid" ] && [ -z "$tally_files_exist" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
.pending-triggers and/or .pending-removals are missing - re-initializing resource"
|
||||
elif [ -z "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
resource initialized with pre-existing .pending-triggers and .pending-removals - ignoring"
|
||||
elif [ -z "$ref_exists_and_is_valid" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
initializing tally files"
|
||||
fi
|
||||
|
||||
if [ -n "$added_items" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
additions:
|
||||
$added_items"
|
||||
fi
|
||||
|
||||
if [ -n "$removed_items" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
removals:
|
||||
$removed_items"
|
||||
fi
|
||||
|
||||
git commit --allow-empty -m "$commit_message"
|
||||
git push
|
||||
fi
|
||||
|
||||
###########
|
||||
#
|
||||
# end updating triggers
|
||||
#
|
||||
###########
|
||||
|
||||
echo "$result" >&3
|
||||
28
dockerfiles/pool-trigger-resource/assets/common.sh
Executable file
28
dockerfiles/pool-trigger-resource/assets/common.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export TMPDIR=${TMPDIR:-/tmp}
|
||||
|
||||
load_pubkey() {
|
||||
local private_key_path=$TMPDIR/git-resource-private-key
|
||||
|
||||
(jq -r '.source.private_key // empty' < "$1") > "$private_key_path"
|
||||
|
||||
if [ -s "$private_key_path" ]; then
|
||||
chmod 0600 "$private_key_path"
|
||||
|
||||
eval "$(ssh-agent)" >/dev/null 2>&1
|
||||
trap 'kill $SSH_AGENT_PID' 0
|
||||
|
||||
ssh-add "$private_key_path" >/dev/null 2>&1
|
||||
|
||||
mkdir -p ~/.ssh
|
||||
cat > ~/.ssh/config <<EOF
|
||||
StrictHostKeyChecking no
|
||||
LogLevel quiet
|
||||
EOF
|
||||
chmod 0600 ~/.ssh/config
|
||||
fi
|
||||
}
|
||||
2
dockerfiles/pool-trigger-resource/assets/in
Executable file
2
dockerfiles/pool-trigger-resource/assets/in
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
cat
|
||||
2
dockerfiles/pool-trigger-resource/assets/out
Executable file
2
dockerfiles/pool-trigger-resource/assets/out
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
cat
|
||||
11
dockerfiles/test-bitnami-ldap/Dockerfile
Normal file
11
dockerfiles/test-bitnami-ldap/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright 2024-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# It seems that Bitnami no longer supports openldap.
|
||||
# See https://github.com/bitnami/containers/issues/83267
|
||||
# All existing container images have been migrated from the public catalog (docker.io/bitnami) to
|
||||
# the “Bitnami Legacy” repository (docker.io/bitnamilegacy), where they will no longer receive updates.
|
||||
#
|
||||
# FROM bitnami/openldap:2.6.10
|
||||
|
||||
FROM bitnamilegacy/openldap:2.6.10
|
||||
28
dockerfiles/test-cfssl/Dockerfile
Normal file
28
dockerfiles/test-cfssl/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# The cfssl/cfssl container image on dockerhub is built poorly.
|
||||
# For every arch, the image contains /bin/* binaries for amd64.
|
||||
# Therefore, we cannot use bash on arm64 inside this container image.
|
||||
# This was observed in cfssl/cfssl:v1.6.4.
|
||||
# However, they do compile their own binaries for both arm64 and amd64,
|
||||
# so we can just copy their binaries into a vanilla linux base image.
|
||||
FROM cfssl/cfssl:v1.6.5 as cfssl
|
||||
|
||||
# We just need any basic unix with bash, but we can pick the same
|
||||
# base image that they use, just in case they did any dynamic linking.
|
||||
FROM golang:1.25.5
|
||||
|
||||
# Thier Docerfile https://github.com/cloudflare/cfssl/blob/master/Dockerfile
|
||||
# calls their Makefile https://github.com/cloudflare/cfssl/blob/master/Makefile
|
||||
# which builds several binaries. Copy them all.
|
||||
COPY --from=cfssl /usr/bin/cf* /usr/local/bin
|
||||
COPY --from=cfssl /usr/bin/mkbundle /usr/local/bin
|
||||
COPY --from=cfssl /usr/bin/multirootca /usr/local/bin
|
||||
# Their Dockerfile also populates this directory, so copy that too.
|
||||
COPY --from=cfssl /etc/cfssl /etc/cfssl
|
||||
|
||||
# These lines are copied from the cfssl Dockerfile.
|
||||
EXPOSE 8888
|
||||
ENTRYPOINT ["cfssl"]
|
||||
CMD ["--help"]
|
||||
4
dockerfiles/test-dex/Dockerfile
Normal file
4
dockerfiles/test-dex/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM ghcr.io/dexidp/dex:v2.44.0
|
||||
13
dockerfiles/test-forward-proxy/Dockerfile
Normal file
13
dockerfiles/test-forward-proxy/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:13.2-slim
|
||||
|
||||
# Install Squid and drop in a very basic, open proxy configuration.
|
||||
RUN apt-get update && apt-get install -y squid
|
||||
COPY squid.conf /etc/squid/squid.conf
|
||||
EXPOSE 3128
|
||||
|
||||
# Launch Squid as a foreground process.
|
||||
CMD squid -N -C -n proxy -d1 2>&1
|
||||
56
dockerfiles/test-forward-proxy/squid.conf
Normal file
56
dockerfiles/test-forward-proxy/squid.conf
Normal file
@@ -0,0 +1,56 @@
|
||||
## listen on TCP 3128
|
||||
http_port 3128
|
||||
|
||||
## Prevent caching anything (pass through only)
|
||||
cache deny all
|
||||
|
||||
## Allow all connections.
|
||||
http_access allow all
|
||||
|
||||
## Where does Squid log to?
|
||||
cache_store_log none
|
||||
cache_log /dev/null
|
||||
access_log daemon:/var/log/squid/access.log squid
|
||||
access_log syslog:user.info squid
|
||||
|
||||
## When logging, web auditors want to see the full uri, even with the query terms
|
||||
strip_query_terms off
|
||||
|
||||
## Keep 7 days of logs
|
||||
logfile_rotate 7
|
||||
|
||||
## How much RAM, in MB, to use for cache? Default since squid 3.1 is 256 MB
|
||||
cache_mem 8 MB
|
||||
|
||||
## Maximum size of individual objects to store in cache
|
||||
maximum_object_size 1 MB
|
||||
|
||||
## Amount of data to buffer from server to client
|
||||
read_ahead_gap 64 KB
|
||||
|
||||
## Number of file descriptors to support (default is 2**20 which takes up ~408 MB of memory)
|
||||
max_filedescriptors 65536
|
||||
|
||||
## Drop X-Forwarded-For headers
|
||||
forwarded_for delete
|
||||
|
||||
## Suppress sending squid version information
|
||||
httpd_suppress_version_string on
|
||||
|
||||
## How long to wait when shutting down squid
|
||||
shutdown_lifetime 10 seconds
|
||||
|
||||
## What hostname to display? (defaults to system hostname)
|
||||
visible_hostname proxy
|
||||
|
||||
## Drop some response headers that Squid normally adds (just being paranoid here)
|
||||
reply_header_access Server deny all
|
||||
reply_header_access Via deny all
|
||||
reply_header_access X-Cache deny all
|
||||
reply_header_access X-Cache-Lookup deny all
|
||||
reply_header_access X-Squid-Error deny all
|
||||
|
||||
## Drop denied connections with just a TCP reset (no error page that might leak info)
|
||||
deny_info TCP_RESET all
|
||||
|
||||
dns_v4_first off
|
||||
4
dockerfiles/test-kubectl/Dockerfile
Normal file
4
dockerfiles/test-kubectl/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM bitnami/kubectl:latest
|
||||
38
hack/approve-and-merge.sh
Executable file
38
hack/approve-and-merge.sh
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
repo=vmware/pinniped
|
||||
current_branch_name=$(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
if [[ "$current_branch_name" != "ci" ]]; then
|
||||
echo "error: this script should only be used on the ci branch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Print the list of PRs to the screen.
|
||||
PAGER='' gh pr list --base ci --repo $repo --limit 1000
|
||||
|
||||
# Exit if there are no PRs found.
|
||||
count_prs=$(gh pr list --base ci --repo $repo --jq ". | length" --json "number")
|
||||
if [[ "${count_prs}" == "0" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
read -p "Do you wish to approve and merge these PRs for the ci branch? y/n: " yn
|
||||
case $yn in
|
||||
[Yy]* );;
|
||||
* ) exit 0;;
|
||||
esac
|
||||
|
||||
gh pr list --base ci --repo $repo --json="number" --jq ".[] | .number" \
|
||||
| xargs -I{} gh pr review {} --approve
|
||||
|
||||
gh pr list --base ci --repo $repo --json="number" --jq ".[] | .number" \
|
||||
| xargs -I{} gh pr merge {} --merge --delete-branch
|
||||
|
||||
echo "now pulling the merged commits"
|
||||
git pull --rebase --autostash
|
||||
29
hack/check-copyright-year.sh
Executable file
29
hack/check-copyright-year.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
files=$(git diff --cached --name-only)
|
||||
year=$(date +"%Y")
|
||||
|
||||
missing_copyright_files=()
|
||||
|
||||
for f in $files; do
|
||||
head -10 "$f" | grep -i 'Copyright.*the Pinniped contributors' 2>&1 1>/dev/null || continue
|
||||
|
||||
if ! head -10 "$f" | grep -i -e "Copyright.*$year.*the Pinniped contributors" 2>&1 1>/dev/null; then
|
||||
missing_copyright_files+=("$f")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${#missing_copyright_files[@]}" -gt "0" ]]; then
|
||||
echo "Copyright notice should include the year the file was created and the year the file was last modified."
|
||||
echo "$year is missing in the copyright notice of the following files:"
|
||||
for f in "${missing_copyright_files[@]}"; do
|
||||
echo " $f"
|
||||
done
|
||||
echo "Try using hack/update-copyright-year.sh to update the copyright automatically in staged files."
|
||||
exit 1
|
||||
fi
|
||||
75
hack/create-gke-acceptance-env.sh
Executable file
75
hack/create-gke-acceptance-env.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if ! [ -x "$(command -v gcloud)" ]; then
|
||||
echo 'Error: Google Cloud SDK (gcloud) is not installed (see https://cloud.google.com/sdk/docs/quickstarts).' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${SHARED_VPC_PROJECT:-}" ]]; then
|
||||
echo "SHARED_VPC_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${SHARED_VPC_NAME:-}" ]]; then
|
||||
echo "SHARED_VPC_NAME env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${SUBNET_NAME:-}" ]]; then
|
||||
echo "SUBNET_NAME env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER_ZONE="us-west1-c"
|
||||
SUBNET_REGION="us-west1"
|
||||
|
||||
# Create (or recreate) a GKE acceptance cluster.
|
||||
# Pro tip: The GCP Console UI can help you build this command.
|
||||
# The following fields were customized, and all of the others are left as the GCP Console's defaults:
|
||||
# - Cluster name
|
||||
# - Machine type - starting in Aug 2025, the google pods request more than 1 CPU, making them not fit on a single e2-medium node
|
||||
# - Cluster version - newest at the time
|
||||
# - Num nodes - sized smaller to be cheaper
|
||||
# - Maintenance window start and recurrence - to avoid downtime during business hours
|
||||
# - Issue client certificate - to make it possible to use an admin kubeconfig without the GKE auth plugin
|
||||
# - tags, authorized networks, private nodes, private endpoint, network, subnet, and secondary ranges
|
||||
# - service account
|
||||
gcloud container --project "$PINNIPED_GCP_PROJECT" clusters create "gke-acceptance-cluster" \
|
||||
--zone "$CLUSTER_ZONE" \
|
||||
--no-enable-basic-auth \
|
||||
--cluster-version "1.32.4-gke.1415000" \
|
||||
--release-channel "regular" \
|
||||
--machine-type "e2-standard-2" \
|
||||
--image-type "COS_CONTAINERD" --disk-type "pd-balanced" --disk-size "100" --metadata disable-legacy-endpoints=true \
|
||||
--num-nodes "1" \
|
||||
--logging=SYSTEM,WORKLOAD --monitoring=SYSTEM,STORAGE,POD,DEPLOYMENT,STATEFULSET,DAEMONSET,HPA,CADVISOR,KUBELET \
|
||||
--no-enable-intra-node-visibility \
|
||||
--default-max-pods-per-node "110" \
|
||||
--security-posture=standard --workload-vulnerability-scanning=disabled \
|
||||
--addons HorizontalPodAutoscaling,HttpLoadBalancing,GcePersistentDiskCsiDriver \
|
||||
--enable-autoupgrade --enable-autorepair --max-surge-upgrade 1 --max-unavailable-upgrade 0 \
|
||||
--binauthz-evaluation-mode=DISABLED --enable-managed-prometheus \
|
||||
--enable-shielded-nodes --shielded-integrity-monitoring --no-shielded-secure-boot \
|
||||
--node-locations "$CLUSTER_ZONE" \
|
||||
--maintenance-window-start "2020-07-01T03:00:00Z" --maintenance-window-end "2020-07-01T11:00:00Z" \
|
||||
--maintenance-window-recurrence "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA,SU" \
|
||||
--issue-client-certificate \
|
||||
--tags "gke-broadcom" \
|
||||
--enable-master-authorized-networks \
|
||||
--master-authorized-networks "10.0.0.0/8" \
|
||||
--enable-private-nodes \
|
||||
--enable-private-endpoint \
|
||||
--enable-ip-alias \
|
||||
--network "projects/${SHARED_VPC_PROJECT}/global/networks/${SHARED_VPC_NAME}" \
|
||||
--subnetwork "projects/${SHARED_VPC_PROJECT}/regions/${SUBNET_REGION}/subnetworks/${SUBNET_NAME}" \
|
||||
--cluster-secondary-range-name "services" \
|
||||
--services-secondary-range-name "pods" \
|
||||
--service-account "terraform@${PINNIPED_GCP_PROJECT}.iam.gserviceaccount.com"
|
||||
68
hack/edit-gcloud-secret.sh
Executable file
68
hack/edit-gcloud-secret.sh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "usage: $0 SECRET_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -u
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo "Please install the yq CLI"
|
||||
exit 1
|
||||
fi
|
||||
if ! command -v delta &> /dev/null; then
|
||||
echo "Please install the delta CLI (brew install git-delta)"
|
||||
exit 1
|
||||
fi
|
||||
if ! command -v gcloud &> /dev/null; then
|
||||
echo "Please install the gcloud CLI"
|
||||
exit 1
|
||||
fi
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temporary directory for secrets, cleaned up at the end of this script.
|
||||
trap 'rm -rf "$TEMP_DIR"' EXIT
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
# Grab the current version.
|
||||
echo "Downloading the latest version of '$1'..."
|
||||
gcloud secrets versions access latest --secret="$1" --project "$PINNIPED_GCP_PROJECT" > "$TEMP_DIR/$1.yaml"
|
||||
|
||||
# Use yq to format the YAML into a consistent style.
|
||||
# TODO: there is a bug in yq that strips leading comments on the first lines of a file when -P is used.
|
||||
# For now, we'll skip the pretty-printing.
|
||||
# yq eval -i -P '.' "$TEMP_DIR/$1.yaml"
|
||||
yq eval -i '.' "$TEMP_DIR/$1.yaml"
|
||||
cp "$TEMP_DIR/$1.yaml" "$TEMP_DIR/$1-original.yaml"
|
||||
|
||||
# Invoke $EDITOR to modify the file.
|
||||
${EDITOR:-vim} "$TEMP_DIR/$1.yaml"
|
||||
|
||||
# Format the output from the editor just as we did before the edit.
|
||||
|
||||
# TODO: there is a bug in yq that strips leading comments on the first lines of a file when -P is used.
|
||||
# For now, we'll skip the pretty-printing.
|
||||
# yq eval -i -P '.' "$TEMP_DIR/$1.yaml"
|
||||
yq eval -i '.' "$TEMP_DIR/$1.yaml"
|
||||
|
||||
# Dump the diff using git-delta.
|
||||
( cd "$TEMP_DIR" && delta "$1-original.yaml" "$1.yaml" || true )
|
||||
|
||||
read -p "Save as new version of '$1' [yN]: " -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
gcloud secrets versions add "$1" --data-file "$TEMP_DIR/$1.yaml" --project "$PINNIPED_GCP_PROJECT"
|
||||
fi
|
||||
58
hack/fly-helpers.sh
Normal file
58
hack/fly-helpers.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#
|
||||
# Some global fly config.
|
||||
#
|
||||
export FLY_CLI=/usr/local/bin/fly
|
||||
export CONCOURSE_URL=https://ci.pinniped.broadcom.net
|
||||
export CONCOURSE_TEAM=main
|
||||
export CONCOURSE_TARGET=pinniped
|
||||
export ROOT_DIR
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.."
|
||||
|
||||
#
|
||||
# Some helper functions for the update-pipeline scripts to use.
|
||||
#
|
||||
function set_pipeline() {
|
||||
# Ensure that fly is installed/upgraded/configured.
|
||||
"$ROOT_DIR"/hack/setup-fly.sh
|
||||
|
||||
# Ensure that the user is authenticated with gcloud.
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Local vars.
|
||||
local pipeline_name=$1
|
||||
local pipeline_file=$2
|
||||
local gcloud_project="$PINNIPED_GCP_PROJECT"
|
||||
local gcloud_secret_name=concourse-secrets
|
||||
|
||||
# Create/update the pipeline.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" set-pipeline \
|
||||
--pipeline "$pipeline_name" \
|
||||
--config "$pipeline_file" \
|
||||
--load-vars-from <(gcloud secrets versions access latest \
|
||||
--secret="$gcloud_secret_name" \
|
||||
--project "$gcloud_project")
|
||||
}
|
||||
|
||||
function ensure_time_resource_has_at_least_one_version() {
|
||||
local pipeline_name=$1
|
||||
local resource_name=$2
|
||||
|
||||
# Force the specified time resource to have at least one version. Idempotent.
|
||||
# For a new pipeline, a time resource will have no versions until the specified time has occurred.
|
||||
# For example, a once-per-night time resource will have no versions until that time
|
||||
# has passed on the first night.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" check-resource \
|
||||
--resource "$pipeline_name/$resource_name" \
|
||||
--from "time:2000-01-01T00:00:00Z" >/dev/null
|
||||
}
|
||||
43
hack/get-aws-ad-env-vars.sh
Executable file
43
hack/get-aws-ad-env-vars.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# To be run before local integration tests.
|
||||
# From the pinniped repo:
|
||||
# hack/prepare-for-integration-tests.sh --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh"
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function _get_concourse_secret {
|
||||
gcloud secrets versions access latest --secret="concourse-secrets" --project "$PINNIPED_GCP_PROJECT" | yq e "$1"
|
||||
}
|
||||
|
||||
export PINNIPED_TEST_AD_HOST="$(_get_concourse_secret '.aws-ad-host')"
|
||||
export PINNIPED_TEST_AD_DOMAIN="$(_get_concourse_secret '.aws-ad-domain')"
|
||||
export PINNIPED_TEST_AD_BIND_ACCOUNT_USERNAME="$(_get_concourse_secret '.aws-ad-bind-account-username')"
|
||||
export PINNIPED_TEST_AD_BIND_ACCOUNT_PASSWORD="$(_get_concourse_secret '.aws-ad-bind-account-password')"
|
||||
export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_NAME="objectGUID"
|
||||
export PINNIPED_TEST_AD_USER_UNIQUE_ID_ATTRIBUTE_VALUE="$(_get_concourse_secret '.aws-ad-user-unique-id-attribute-value')"
|
||||
export PINNIPED_TEST_AD_USER_USER_PRINCIPAL_NAME="$(_get_concourse_secret '.aws-ad-user-userprincipalname')"
|
||||
export PINNIPED_TEST_AD_USER_PASSWORD="$(_get_concourse_secret '.aws-ad-user-password')"
|
||||
export PINNIPED_TEST_AD_LDAPS_CA_BUNDLE="$(_get_concourse_secret '.aws-ad-ca-data')"
|
||||
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_DN="$(_get_concourse_secret '.aws-ad-expected-direct-groups-dn')"
|
||||
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_CN="$(_get_concourse_secret '.aws-ad-expected-direct-groups-cn')"
|
||||
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME="$(_get_concourse_secret '.aws-ad-expected-direct-and-nested-groups-samaccountnames')"
|
||||
export PINNIPED_TEST_AD_USER_EXPECTED_GROUPS_SAMACCOUNTNAME_DOMAINNAMES="$(_get_concourse_secret '.aws-ad-expected-direct-and-nested-groups-samaccountname-domainnames')"
|
||||
export PINNIPED_TEST_DEACTIVATED_AD_USER_SAMACCOUNTNAME="$(_get_concourse_secret '.aws-ad-deactivated-user-samaccountname')"
|
||||
export PINNIPED_TEST_DEACTIVATED_AD_USER_PASSWORD="$(_get_concourse_secret '.aws-ad-deactivated-user-password')"
|
||||
export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_NAME="mail"
|
||||
export PINNIPED_TEST_AD_USER_EMAIL_ATTRIBUTE_VALUE="$(_get_concourse_secret '.aws-ad-user-email-attribute-value')"
|
||||
export PINNIPED_TEST_AD_DEFAULTNAMINGCONTEXT_DN="$(_get_concourse_secret '.aws-ad-defaultnamingcontext')"
|
||||
export PINNIPED_TEST_AD_USERS_DN="$(_get_concourse_secret '.aws-ad-users-dn')"
|
||||
|
||||
unset -f _get_concourse_secret
|
||||
39
hack/get-github-env-vars.sh
Executable file
39
hack/get-github-env-vars.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# To be run before local integration tests.
|
||||
# From the pinniped repo:
|
||||
# hack/prepare-for-integration-tests.sh --get-github-vars "../pinniped-ci-branch/hack/get-github-env-vars.sh"
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function _get_concourse_secret {
|
||||
gcloud secrets versions access latest --secret="concourse-secrets" --project "$PINNIPED_GCP_PROJECT" | yq e "$1"
|
||||
}
|
||||
|
||||
export PINNIPED_TEST_GITHUB_APP_CLIENT_ID="$(_get_concourse_secret '.github-app-client-id')"
|
||||
export PINNIPED_TEST_GITHUB_APP_CLIENT_SECRET="$(_get_concourse_secret '.github-app-client-secret')"
|
||||
|
||||
export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_ID="$(_get_concourse_secret '.github-oauth-app-client-id')"
|
||||
export PINNIPED_TEST_GITHUB_OAUTH_APP_CLIENT_SECRET="$(_get_concourse_secret '.github-oauth-app-client-secret')"
|
||||
export PINNIPED_TEST_GITHUB_OAUTH_APP_ALLOWED_CALLBACK_URL="$(_get_concourse_secret '.github-oauth-app-allowed-callback-url')"
|
||||
|
||||
export PINNIPED_TEST_GITHUB_USER_USERNAME="$(_get_concourse_secret '.github-username')"
|
||||
export PINNIPED_TEST_GITHUB_USER_PASSWORD="$(_get_concourse_secret '.github-password')"
|
||||
export PINNIPED_TEST_GITHUB_USER_OTP_SECRET="$(_get_concourse_secret '.github-user-otp-secret')"
|
||||
|
||||
export PINNIPED_TEST_GITHUB_USERID="$(_get_concourse_secret '.github-userid')"
|
||||
export PINNIPED_TEST_GITHUB_ORG="$(_get_concourse_secret '.github-org')"
|
||||
export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_NAMES="$(_get_concourse_secret '.github-expected-team-names')"
|
||||
export PINNIPED_TEST_GITHUB_EXPECTED_TEAM_SLUGS="$(_get_concourse_secret '.github-expected-team-slugs')"
|
||||
|
||||
unset -f _get_concourse_secret
|
||||
20
hack/list-all-running-jobs.sh
Executable file
20
hack/list-all-running-jobs.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Define some env vars
|
||||
source "$script_dir/fly-helpers.sh"
|
||||
|
||||
# Setup and login if needed
|
||||
"$ROOT_DIR"/hack/setup-fly.sh
|
||||
|
||||
# List all jobs that are currently running in CI.
|
||||
# An empty result means that there are no jobs running.
|
||||
for p in $($FLY_CLI --target "$CONCOURSE_TARGET" pipelines --json | jq -r ".[].name"); do
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" jobs -p "$p" --json | jq -r ".[] | select(.next_build.status == \"started\") | (\"$p/\" + .name)"
|
||||
done
|
||||
37
hack/pinniped-pre-commit.sh
Executable file
37
hack/pinniped-pre-commit.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
pinniped_ci_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
pinniped_path="${1-$PWD}"
|
||||
pinniped_ci_path="${2-$pinniped_ci_root}"
|
||||
|
||||
cd "$pinniped_path" || exit 1
|
||||
|
||||
if [[ ! -f "./hack/module.sh" ]]; then
|
||||
echo "$pinniped_path does not appear to be the path to the source code repo directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$pinniped_ci_path/hack/run-integration-tests.sh" ]]; then
|
||||
echo "$pinniped_ci_path does not appear to be the path to the ci repo directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Running linters..."
|
||||
./hack/module.sh lint
|
||||
|
||||
echo
|
||||
echo "Running units..."
|
||||
./hack/module.sh unittest
|
||||
|
||||
echo
|
||||
echo "Running integrations..."
|
||||
"$pinniped_ci_path"/hack/run-integration-tests.sh --from-clean-cluster
|
||||
|
||||
echo
|
||||
echo "ALL TESTS PASSED"
|
||||
139
hack/prepare-for-uninstall-test.sh
Executable file
139
hack/prepare-for-uninstall-test.sh
Executable file
@@ -0,0 +1,139 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# This script can be used to prepare a kind cluster and deploy the app
|
||||
# in preparation for running the uninstall test.
|
||||
# It will also output instructions on how to run the uninstall test.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
help=no
|
||||
skip_build=no
|
||||
pinniped_ci_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
PARAMS=""
|
||||
while (("$#")); do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
help=yes
|
||||
shift
|
||||
;;
|
||||
-s | --skip-build)
|
||||
skip_build=yes
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "Error: Unsupported flag $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
PARAMS="$PARAMS $1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
eval set -- "$PARAMS"
|
||||
|
||||
if [[ "$help" == "yes" ]]; then
|
||||
me="$(basename "${BASH_SOURCE[0]}")"
|
||||
echo "Usage:"
|
||||
echo " $me [flags] [path/to/pinniped] [path/to/pinniped-ci-branch]"
|
||||
echo
|
||||
echo " path/to/pinniped default: \$PWD ($PWD)"
|
||||
echo " path/to/pinniped-ci-branch default: the parent directory of this script ($pinniped_ci_root)"
|
||||
echo
|
||||
echo "Flags:"
|
||||
echo " -h, --help: print this usage"
|
||||
echo " -s, --skip-build: reuse the most recently built image of the app instead of building"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pinniped_path="${1-$PWD}"
|
||||
pinniped_ci_path="${2-$pinniped_ci_root}"
|
||||
|
||||
if ! command -v kind >/dev/null; then
|
||||
echo "Please install kind. e.g. 'brew install kind' for MacOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v ytt >/dev/null; then
|
||||
log_error "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v kapp >/dev/null; then
|
||||
log_error "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v kubectl >/dev/null; then
|
||||
log_error "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$pinniped_path" || exit 1
|
||||
|
||||
if [[ ! -f Dockerfile || ! -d deploy ]]; then
|
||||
echo "$pinniped_path does not appear to be the path to the source code repo directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "$pinniped_ci_path/pipelines/shared-helpers" ]]; then
|
||||
echo "$pinniped_ci_path does not appear to be the path to the ci repo directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Deleting running kind clusters to prepare a clean slate for the install+uninstall test..."
|
||||
kind delete cluster --name pinniped
|
||||
|
||||
echo "Creating a kind cluster..."
|
||||
kind create cluster --name pinniped
|
||||
|
||||
registry="docker.io"
|
||||
repo="test/build"
|
||||
registry_repo="$registry/$repo"
|
||||
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
|
||||
|
||||
if [[ "$skip_build" == "yes" ]]; then
|
||||
most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1)
|
||||
if [[ -n "$most_recent_tag" ]]; then
|
||||
tag="$most_recent_tag"
|
||||
do_build=no
|
||||
else
|
||||
# Oops, there was no previous build. Need to build anyway.
|
||||
do_build=yes
|
||||
fi
|
||||
else
|
||||
do_build=yes
|
||||
fi
|
||||
|
||||
registry_repo_tag="${registry_repo}:${tag}"
|
||||
|
||||
if [[ "$do_build" == "yes" ]]; then
|
||||
# Rebuild the code
|
||||
echo "Docker building the app..."
|
||||
docker build . --tag "$registry_repo_tag"
|
||||
fi
|
||||
|
||||
# Load it into the cluster
|
||||
echo "Loading the app's container image into the kind cluster..."
|
||||
kind load docker-image "$registry_repo_tag" --name pinniped
|
||||
|
||||
cat <<EOF >/tmp/uninstall-test-env
|
||||
# The following env vars should be set before running $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh
|
||||
export IMAGE_REPO="$registry_repo"
|
||||
export IMAGE_TAG="$tag"
|
||||
EOF
|
||||
|
||||
echo "Done!"
|
||||
echo
|
||||
echo "Ready to run an uninstall test."
|
||||
echo " cd $pinniped_path"
|
||||
echo "Then either"
|
||||
echo " source /tmp/uninstall-test-env && $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-test.sh"
|
||||
echo "or"
|
||||
echo " source /tmp/uninstall-test-env && $pinniped_ci_path/pipelines/shared-tasks/run-uninstall-test/run-uninstall-from-existing-namespace-test.sh"
|
||||
echo
|
||||
echo "When you're finished, use 'kind delete cluster --name pinniped to tear down the cluster."
|
||||
248
hack/prepare-remote-cluster-for-integration-tests.sh
Executable file
248
hack/prepare-remote-cluster-for-integration-tests.sh
Executable file
@@ -0,0 +1,248 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Assuming that you have somehow got your hands on a remote GKE or kind cluster,
|
||||
# and that you have an admin kubeconfig file for it,
|
||||
# and that you have already built/pushed the Pinniped container image that you would like to test,
|
||||
# then you can use this script to deploy in preparation for integration or manual testing.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
function log_note() {
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "${GREEN}$*${NC}"
|
||||
else
|
||||
echo "$*"
|
||||
fi
|
||||
}
|
||||
|
||||
function log_error() {
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "🙁${RED} Error: $* ${NC}"
|
||||
else
|
||||
echo ":( Error: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dependency() {
|
||||
if ! command -v "$1" >/dev/null; then
|
||||
log_error "Missing dependency..."
|
||||
log_error "$2"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Handle argument parsing and help message
|
||||
#
|
||||
help=no
|
||||
kubeconfig=""
|
||||
image_tag=""
|
||||
image_repo=""
|
||||
pinniped_repo=""
|
||||
cluster_type=""
|
||||
image_digest=""
|
||||
|
||||
while (("$#")); do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
help=yes
|
||||
shift
|
||||
;;
|
||||
-k | --kubeconfig)
|
||||
shift
|
||||
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
|
||||
if [[ "$#" == "0" || "$1" == -* ]]; then
|
||||
log_error "-k|--kubeconfig requires a kubeconfig path to be specified"
|
||||
exit 1
|
||||
fi
|
||||
kubeconfig=$1
|
||||
shift
|
||||
;;
|
||||
-t | --image-tag)
|
||||
shift
|
||||
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
|
||||
if [[ "$#" == "0" || "$1" == -* ]]; then
|
||||
log_error "-t|--image-tag requires a tag to be specified"
|
||||
exit 1
|
||||
fi
|
||||
image_tag=$1
|
||||
shift
|
||||
;;
|
||||
-d | --image-digest)
|
||||
shift
|
||||
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
|
||||
if [[ "$#" == "0" || "$1" == -* ]]; then
|
||||
log_error "--d|--image-digest requires a digest to be specified"
|
||||
exit 1
|
||||
fi
|
||||
image_digest=$1
|
||||
shift
|
||||
;;
|
||||
-r | --image-repo)
|
||||
shift
|
||||
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
|
||||
if [[ "$#" == "0" || "$1" == -* ]]; then
|
||||
log_error "-r|--image-repo requires an image repo to be specified"
|
||||
exit 1
|
||||
fi
|
||||
image_repo=$1
|
||||
shift
|
||||
;;
|
||||
-p | --pinniped-repo)
|
||||
shift
|
||||
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
|
||||
if [[ "$#" == "0" || "$1" == -* ]]; then
|
||||
log_error "-p|--pinniped-repo requires a path to the pinniped repo to be specified"
|
||||
exit 1
|
||||
fi
|
||||
pinniped_repo=$1
|
||||
shift
|
||||
;;
|
||||
-c | --cluster-type)
|
||||
shift
|
||||
# If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error
|
||||
if [[ "$#" == "0" || "$1" == -* ]]; then
|
||||
log_error "-c|--cluster-type requires the type of the cluster to be specified"
|
||||
exit 1
|
||||
fi
|
||||
cluster_type=$1
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
log_error "Unsupported flag $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
log_error "Unsupported positional arg $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Note that if you are using a remote kind cluster then it might be more convenient to use this public repo:
|
||||
# ghcr.io/pinniped-ci-bot/manual-test-pinniped-images
|
||||
# You can give yourself permission to push to that repo at:
|
||||
# https://github.com/users/pinniped-ci-bot/packages/container/manual-test-pinniped-images/settings
|
||||
default_image_repo="gcr.io/$PINNIPED_GCP_PROJECT/manual-test-pinniped-images"
|
||||
default_image_tag="latest"
|
||||
|
||||
if [[ "$help" == "yes" ]]; then
|
||||
me="$(basename "${BASH_SOURCE[0]}")"
|
||||
log_note "Usage:"
|
||||
log_note " $me [flags]"
|
||||
log_note
|
||||
log_note "Flags:"
|
||||
log_note " -h, --help: print this usage"
|
||||
log_note " -k, --kubeconfig: path to the kubeconfig for your cluster (required)"
|
||||
log_note " -c, --cluster-type: the type of cluster targeted by the kubeconfig, either 'gke' or 'kind' (required)"
|
||||
log_note " -r, --image-repo: image registry/repository for Pinniped server container image to deploy (default: $default_image_repo)"
|
||||
log_note " -t, --image-tag: image tag for Pinniped server container image to deploy (default: $default_image_tag)"
|
||||
log_note " -d, --image-digest: image digest for Pinniped server container image to deploy. Takes precedence over --image-tag."
|
||||
log_note " -p, --pinniped-repo: path to pinniped git repo (default: a sibling directory called pinniped)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$kubeconfig" == "" ]]; then
|
||||
log_error "no kubeconfig set. -k|--kubeconfig is a required option."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$kubeconfig" != "/"* ]]; then
|
||||
# If it looks like a relative path then make an an absolute path because we are going to pushd below.
|
||||
kubeconfig="$(pwd)/$kubeconfig"
|
||||
fi
|
||||
|
||||
if [[ ! -f "$kubeconfig" ]]; then
|
||||
log_error "specified kubeconfig file does not exist: $kubeconfig"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$cluster_type" != "gke" && "$cluster_type" != "kind" && "$cluster_type" != "aks" && "$cluster_type" != "eks" ]]; then
|
||||
log_error "specified cluster type must be 'kind', 'eks', 'aks', or 'gke'. -c|--cluster-type is a required option."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$pinniped_repo" == "" ]]; then
|
||||
pinniped_repo="$ROOT/../pinniped"
|
||||
log_note "no pinniped repo path set, defaulting to $pinniped_repo"
|
||||
fi
|
||||
|
||||
if [[ ! (-d "$pinniped_repo" && -d "$pinniped_repo/deploy" && -d "$pinniped_repo/test/cluster_capabilities") ]]; then
|
||||
log_error "$pinniped_repo does not appear to contain the pinniped source code repo"
|
||||
fi
|
||||
|
||||
if [[ "$image_repo" == "" ]]; then
|
||||
image_repo="$default_image_repo"
|
||||
log_note "no image repo set, defaulting to $image_repo"
|
||||
fi
|
||||
|
||||
if [[ "$image_tag" == "" ]]; then
|
||||
image_tag="$default_image_tag"
|
||||
log_note "no image tag set, defaulting to $image_tag"
|
||||
fi
|
||||
|
||||
cluster_capabilities_path="$pinniped_repo/test/cluster_capabilities/$cluster_type.yaml"
|
||||
if [[ ! -f "$cluster_capabilities_path" ]]; then
|
||||
log_error "cluster type capabilities file does not exist: $cluster_capabilities_path"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_dependency ytt "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS"
|
||||
check_dependency kapp "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS"
|
||||
check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
|
||||
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
|
||||
check_dependency openssl "Please install openssl. Should be pre-installed on MacOS."
|
||||
check_dependency nmap "Please install nmap. e.g. 'brew install nmap' for MacOS"
|
||||
|
||||
#
|
||||
# Finished checking arguments and dependencies. Now actually do the work...
|
||||
#
|
||||
export KUBECONFIG="$kubeconfig"
|
||||
export IMAGE_TAG="$image_tag"
|
||||
export IMAGE_REPO="$image_repo"
|
||||
if [[ "$image_digest" != "" ]]; then
|
||||
export IMAGE_DIGEST="$image_digest"
|
||||
fi
|
||||
|
||||
pushd "$pinniped_repo" >/dev/null
|
||||
|
||||
PINNIPED_TEST_CLUSTER_CAPABILITY_FILE="${cluster_capabilities_path}" \
|
||||
DEPLOY_LOCAL_USER_AUTHENTICATOR=yes \
|
||||
DEPLOY_TEST_TOOLS=yes \
|
||||
CONCIERGE_APP_NAME="concierge" \
|
||||
CONCIERGE_NAMESPACE="concierge" \
|
||||
SUPERVISOR_APP_NAME="supervisor" \
|
||||
SUPERVISOR_NAMESPACE="supervisor" \
|
||||
USE_LOAD_BALANCERS_FOR_DEX_AND_SUPERVISOR="yes" \
|
||||
"$ROOT/pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
log_note
|
||||
log_note "🚀 Ready to run integration tests! For example..."
|
||||
|
||||
case "$cluster_type" in
|
||||
gke | aks | eks)
|
||||
log_note "KUBECONFIG='$KUBECONFIG' TEST_ENV_PATH='/tmp/integration-test-env' SOURCE_PATH='$pinniped_repo' $ROOT/pipelines/shared-tasks/run-integration-tests/task.sh"
|
||||
;;
|
||||
kind)
|
||||
log_note "KUBECONFIG='$KUBECONFIG' TEST_ENV_PATH='/tmp/integration-test-env' SOURCE_PATH='$pinniped_repo' START_GCLOUD_PROXY=yes GCP_PROJECT=$PINNIPED_GCP_PROJECT GCP_ZONE=us-west1-a $ROOT/pipelines/shared-tasks/run-integration-tests/task.sh"
|
||||
;;
|
||||
*)
|
||||
log_error "Huh? Should never get here."
|
||||
;;
|
||||
esac
|
||||
19
hack/rebuild-codegen-images.sh
Executable file
19
hack/rebuild-codegen-images.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Define some env vars
|
||||
source "$script_dir/fly-helpers.sh"
|
||||
|
||||
# Setup and login if needed
|
||||
"$ROOT_DIR"/hack/setup-fly.sh
|
||||
|
||||
# Start all the build-k8s-code-generator-* jobs in the dockerfile-builders pipeline.
|
||||
for j in $($FLY_CLI --target "$CONCOURSE_TARGET" jobs --pipeline dockerfile-builders --json | jq -r '.[].name' | grep build-k8s-code-generator-); do
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" trigger-job --job "dockerfile-builders/$j"
|
||||
done
|
||||
85
hack/remote-workstation/create.sh
Executable file
85
hack/remote-workstation/create.sh
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${SHARED_VPC_PROJECT:-}" ]]; then
|
||||
echo "SHARED_VPC_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${SUBNET_NAME:-}" ]]; then
|
||||
echo "SUBNET_NAME env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${DISK_IMAGES_PROJECT:-}" ]]; then
|
||||
echo "DISK_IMAGES_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Create a VM called $instance_name with some reasonable compute power and disk.
|
||||
echo "Creating VM with name $instance_name..."
|
||||
gcloud compute instances create "$instance_name" \
|
||||
--project="$project" --zone="$zone" \
|
||||
--machine-type="e2-standard-8" \
|
||||
--network-interface=stack-type=IPV4_ONLY,subnet=projects/"$SHARED_VPC_PROJECT"/regions/us-west1/subnetworks/"${SUBNET_NAME}",no-address \
|
||||
--create-disk=auto-delete=yes,boot=yes,device-name="$instance_name",image=projects/"${DISK_IMAGES_PROJECT}"/global/images/labs-saas-gcp-debian12-packer-latest,mode=rw,size=40,type=pd-ssd
|
||||
|
||||
# Make a private key for ssh.
|
||||
ssh_key_file="$HOME/.ssh/gcp-remote-workstation-key"
|
||||
if [[ ! -f "$ssh_key_file" ]]; then
|
||||
ssh-keygen -t rsa -b 4096 -q -N "" -f "$ssh_key_file"
|
||||
fi
|
||||
|
||||
# Add the key only to the specific VM instance (as VM metadata).
|
||||
echo "${instance_user}:$(cat "${ssh_key_file}.pub")" >/tmp/ssh-key-values
|
||||
gcloud compute instances add-metadata "$instance_name" \
|
||||
--metadata-from-file ssh-keys=/tmp/ssh-key-values \
|
||||
--zone "$zone" --project "$project"
|
||||
|
||||
# Get the IP so we can use regular ssh (not gcloud ssh).
|
||||
gcloud_instance_ip=$(gcloud compute instances describe \
|
||||
--zone "$zone" --project "$project" "${instance_name}" \
|
||||
--format='get(networkInterfaces[0].networkIP)')
|
||||
|
||||
ssh_dest="${instance_user}@${gcloud_instance_ip}"
|
||||
|
||||
# Wait for the ssh server of the new instance to be ready.
|
||||
attempts=0
|
||||
while ! ssh -i "$ssh_key_file" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$ssh_dest" echo connection test; do
|
||||
echo "Waiting for ssh server to start ..."
|
||||
attempts=$((attempts + 1))
|
||||
if [[ $attempts -gt 25 ]]; then
|
||||
echo "ERROR: ssh server never accepted connections after waiting for a while"
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Copy the deps script to the new VM.
|
||||
echo "Copying deps.sh to $instance_name..."
|
||||
scp -i "$ssh_key_file" \
|
||||
-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
"$here"/lib/deps.sh "$ssh_dest":/tmp
|
||||
|
||||
# Run the deps script on the new VM.
|
||||
"$here"/ssh.sh /tmp/deps.sh
|
||||
26
hack/remote-workstation/delete.sh
Executable file
26
hack/remote-workstation/delete.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
|
||||
# Delete the instance forever. Will prompt for confirmation.
|
||||
echo "Destroying VM $instance_name..."
|
||||
gcloud compute instances delete "$instance_name" \
|
||||
--delete-disks="all" \
|
||||
--project="$project" --zone="$zone"
|
||||
91
hack/remote-workstation/lib/deps.sh
Executable file
91
hack/remote-workstation/lib/deps.sh
Executable file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -exuo pipefail
|
||||
|
||||
# Start in the user's home directory.
|
||||
cd
|
||||
|
||||
# Install brew pre-reqs documented at https://docs.brew.sh/Homebrew-on-Linux#requirements
|
||||
sudo apt-get update && sudo sudo apt-get install build-essential procps curl file git -y
|
||||
# Brew installer command from https://brew.sh. Note that CI=1 turns off an interactive prompt.
|
||||
CI=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
# The installer prints more instructions. It advises you to add brew to profile and install gcc.
|
||||
echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"' >>$HOME/.profile
|
||||
eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
|
||||
brew install gcc
|
||||
|
||||
# Install go.
|
||||
brew install go
|
||||
|
||||
# Install and configure zsh and plugins.
|
||||
brew install zsh zsh-history-substring-search
|
||||
brew install fzf
|
||||
/home/linuxbrew/.linuxbrew/opt/fzf/install --all --no-bash --no-fish
|
||||
# Install https://ohmyz.sh
|
||||
export PATH=$PATH:/home/linuxbrew/.linuxbrew/bin
|
||||
CHSH=no RUNZSH=no KEEP_ZSHRC=yes sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
|
||||
# Install some plugins.
|
||||
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git "$HOME"/.oh-my-zsh/custom/themes/powerlevel10k
|
||||
git clone https://github.com/zsh-users/zsh-autosuggestions "$HOME"/.oh-my-zsh/custom/plugins/zsh-autosuggestions
|
||||
git clone https://github.com/TamCore/autoupdate-oh-my-zsh-plugins "$HOME"/.oh-my-zsh/plugins/autoupdate
|
||||
git clone https://github.com/zdharma-continuum/fast-syntax-highlighting.git "$HOME"/.oh-my-zsh/custom/plugins/fast-syntax-highlighting
|
||||
# Get decent .zshrc and .p10k.zsh files.
|
||||
curl -fsSL https://gist.githubusercontent.com/cfryanr/c84ca9e3fe519b5a7f07426ecc7e3a7c/raw >"$HOME"/.zshrc
|
||||
curl -fsSL https://gist.githubusercontent.com/cfryanr/3e55b770b9be485bd8671377ce04a3f1/raw >"$HOME"/.p10k.zsh
|
||||
# Change the user's default shell.
|
||||
sudo chsh -s /home/linuxbrew/.linuxbrew/bin/zsh "$USER"
|
||||
|
||||
# Get some other useful config files.
|
||||
curl -fsSL https://gist.githubusercontent.com/cfryanr/153e167a1f2c20934fbc4dc32bbec8f2/raw >"$HOME"/.gitconfig
|
||||
curl -fsSL https://gist.githubusercontent.com/cfryanr/80ada8af9a78f08b368327401ea80b6c/raw >"$HOME"/.git-authors
|
||||
|
||||
# Install other useful packages.
|
||||
brew tap homebrew/command-not-found
|
||||
brew tap carvel-dev/carvel
|
||||
brew install ytt kbld kapp imgpkg kwt vendir
|
||||
brew install git git-duet/tap/git-duet pre-commit gh
|
||||
brew install k9s kind kubectl kubectx stern
|
||||
brew install acarl005/homebrew-formulas/ls-go ripgrep procs bat tokei git-delta dust fd httpie chroma
|
||||
brew install watch htop wget
|
||||
brew install jesseduffield/lazydocker/lazydocker ctop dive
|
||||
brew install jq yq
|
||||
brew install grip
|
||||
brew install aws-iam-authenticator
|
||||
brew install step cfssl
|
||||
brew install nmap
|
||||
sudo apt-get install apache2-utils rsync -y
|
||||
|
||||
# Install Chrome
|
||||
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
|
||||
sudo apt install ./google-chrome-stable_current_amd64.deb -y
|
||||
rm ./google-chrome-stable_current_amd64.deb
|
||||
google-chrome --version
|
||||
mkdir "$HOME"/bin
|
||||
|
||||
# Install docker according to procedure from https://docs.docker.com/engine/install/debian/
|
||||
sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release -y
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker-ce docker-ce-cli containerd.io -y
|
||||
sudo usermod -aG docker "$USER"
|
||||
sudo systemctl enable docker.service
|
||||
sudo systemctl enable containerd.service
|
||||
|
||||
# Set up the Pinniped repo
|
||||
mkdir workspace
|
||||
pushd workspace
|
||||
ssh-keyscan -H github.com >> $HOME/.ssh/known_hosts
|
||||
git clone https://github.com/vmware/pinniped.git
|
||||
pushd pinniped
|
||||
pre-commit install
|
||||
./hack/install-linter.sh
|
||||
popd
|
||||
popd
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo "Successfully installed deps!"
|
||||
67
hack/remote-workstation/rsync-to-local.sh
Executable file
67
hack/remote-workstation/rsync-to-local.sh
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2022-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# This is similar to rsync.sh, but with the src and dest flipped at the end.
|
||||
# It will copy all changes from the remote workstation back to your local machine (overwriting your local changes).
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SRC_DIR=${SRC_DIR:-"$HOME/workspace/pinniped"}
|
||||
src_dir_parent=$(dirname "$SRC_DIR")
|
||||
dest_dir="./workspace/pinniped"
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ssh_key_file="$HOME/.ssh/gcp-remote-workstation-key"
|
||||
|
||||
# Get the IP so we can use regular ssh (not gcloud ssh).
|
||||
gcloud_instance_ip=$(gcloud compute instances describe \
|
||||
--zone "$zone" --project "$project" "${instance_name}" \
|
||||
--format='get(networkInterfaces[0].networkIP)')
|
||||
|
||||
ssh_dest="${instance_user}@${gcloud_instance_ip}"
|
||||
|
||||
if [[ ! -d "$SRC_DIR" ]]; then
|
||||
echo "ERROR: $SRC_DIR does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SRC_DIR"
|
||||
local_commit=$(git rev-parse HEAD)
|
||||
remote_commit=$("$here"/ssh.sh "cd $dest_dir; git rev-parse HEAD" 2>/dev/null | tr -dc '[:print:]')
|
||||
|
||||
if [[ -z "$local_commit" || -z "$remote_commit" ]]; then
|
||||
echo "ERROR: Could not determine currently checked out git commit sha"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$local_commit" != "$remote_commit" ]]; then
|
||||
echo "ERROR: Local and remote repos are not on the same commit. This is usually a mistake."
|
||||
echo "Local was $SRC_DIR at ${local_commit}"
|
||||
echo "Remote was ${instance_name}:${dest_dir} at ${remote_commit}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Skip large files because they are probably compiled binaries.
|
||||
# Also skip other common filenames that we wouldn't need to sync.
|
||||
echo "Starting rsync from remote to local for $SRC_DIR..."
|
||||
rsync \
|
||||
--progress --delete --archive --compress --human-readable \
|
||||
--max-size 200K \
|
||||
--exclude .git/ --exclude .idea/ --exclude .DS_Store --exclude '*.test' --exclude '*.out' \
|
||||
--rsh "ssh -i '$ssh_key_file' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
|
||||
"$ssh_dest:$dest_dir" "$src_dir_parent"
|
||||
66
hack/remote-workstation/rsync.sh
Executable file
66
hack/remote-workstation/rsync.sh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SRC_DIR=${SRC_DIR:-"$HOME/workspace/pinniped"}
|
||||
dest_dir="./workspace"
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ssh_key_file="$HOME/.ssh/gcp-remote-workstation-key"
|
||||
|
||||
# Get the IP so we can use regular ssh (not gcloud ssh).
|
||||
gcloud_instance_ip=$(gcloud compute instances describe \
|
||||
--zone "$zone" --project "$project" "${instance_name}" \
|
||||
--format='get(networkInterfaces[0].networkIP)')
|
||||
|
||||
ssh_dest="${instance_user}@${gcloud_instance_ip}"
|
||||
|
||||
if [[ ! -d "$SRC_DIR" ]]; then
|
||||
echo "ERROR: $SRC_DIR does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SRC_DIR"
|
||||
local_commit=$(git rev-parse HEAD)
|
||||
remote_commit=$("$here"/ssh.sh "cd $dest_dir/pinniped; git rev-parse HEAD" 2>/dev/null | tr -dc '[:print:]')
|
||||
|
||||
if [[ -z "$local_commit" || -z "$remote_commit" ]]; then
|
||||
echo "ERROR: Could not determine currently checked out git commit sha"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$local_commit" != "$remote_commit" ]]; then
|
||||
echo "ERROR: Local and remote repos are not on the same commit. This is usually a mistake."
|
||||
echo "Local was $SRC_DIR at ${local_commit}"
|
||||
echo "Remote was ${instance_name}:${dest_dir}/pinniped at ${remote_commit}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Skip large files because they are probably compiled binaries.
|
||||
# Also skip other common filenames that we wouldn't need to sync.
|
||||
echo "Starting rsync for $SRC_DIR..."
|
||||
rsync \
|
||||
--progress --delete --archive --compress --human-readable \
|
||||
--max-size 200K \
|
||||
--exclude .git/ --exclude .idea/ --exclude .DS_Store --exclude '*.test' --exclude '*.out' \
|
||||
--rsh "ssh -i '$ssh_key_file' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
|
||||
"$SRC_DIR" "$ssh_dest:$dest_dir"
|
||||
34
hack/remote-workstation/ssh.sh
Executable file
34
hack/remote-workstation/ssh.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
instance_user="${REMOTE_INSTANCE_USERNAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
ssh_key_file="$HOME/.ssh/gcp-remote-workstation-key"
|
||||
|
||||
# Get the IP so we can use regular ssh (not gcloud ssh).
|
||||
gcloud_instance_ip=$(gcloud compute instances describe \
|
||||
--zone "$zone" --project "$project" "${instance_name}" \
|
||||
--format='get(networkInterfaces[0].networkIP)')
|
||||
|
||||
ssh_dest="${instance_user}@${gcloud_instance_ip}"
|
||||
|
||||
# Run ssh with identities forwarded so you can use them with git on the remote host.
|
||||
# Optionally run an arbitrary command on the remote host.
|
||||
# By default, start an interactive session.
|
||||
ssh -i "$ssh_key_file" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -A "$ssh_dest" -- "$@"
|
||||
25
hack/remote-workstation/start.sh
Executable file
25
hack/remote-workstation/start.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
|
||||
# Start an instance which was previously stopped to save money.
|
||||
echo "Starting VM $instance_name..."
|
||||
gcloud compute instances start "$instance_name" \
|
||||
--project="$project" --zone="$zone"
|
||||
25
hack/remote-workstation/stop.sh
Executable file
25
hack/remote-workstation/stop.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
instance_name="${REMOTE_INSTANCE_NAME:-${USER}}"
|
||||
project="$PINNIPED_GCP_PROJECT"
|
||||
zone="us-west1-a"
|
||||
|
||||
# Stop the instance, to save money, in a way that it can be restarted.
|
||||
echo "Stopping VM $instance_name..."
|
||||
gcloud compute instances stop "$instance_name" \
|
||||
--project="$project" --zone="$zone"
|
||||
87
hack/run-integration-tests.sh
Executable file
87
hack/run-integration-tests.sh
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# This script will prepare to run the integration tests and then run them.
|
||||
# Is is a wrapper for prepare-for-integration-tests.sh to make it convenient
|
||||
# to run the integration tests, potentially running them repeatedly.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
help=no
|
||||
skip_build=no
|
||||
delete_kind_cluster=no
|
||||
|
||||
PARAMS=""
|
||||
while (("$#")); do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
help=yes
|
||||
shift
|
||||
;;
|
||||
-s | --skip-build)
|
||||
skip_build=yes
|
||||
shift
|
||||
;;
|
||||
-c | --from-clean-cluster)
|
||||
delete_kind_cluster=yes
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "Error: Unsupported flag $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
PARAMS="$PARAMS $1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
eval set -- "$PARAMS"
|
||||
|
||||
if [[ "$help" == "yes" ]]; then
|
||||
me="$(basename "${BASH_SOURCE[0]}")"
|
||||
echo "Usage:"
|
||||
echo " $me [flags] [path/to/pinniped]"
|
||||
echo
|
||||
echo " path/to/pinniped default: \$PWD ($PWD)"
|
||||
echo
|
||||
echo "Flags:"
|
||||
echo " -h, --help: print this usage"
|
||||
echo " -s, --skip-build: reuse the most recently built image of the app instead of building"
|
||||
echo " -c, --from-clean-cluster: delete and rebuild the kind cluster before running tests"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pinniped_path="${1-$PWD}"
|
||||
cd "$pinniped_path" || exit 1
|
||||
|
||||
if [[ ! -f Dockerfile || ! -d deploy ]]; then
|
||||
echo "$pinniped_path does not appear to be the path to the source code repo directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v kind >/dev/null; then
|
||||
echo "Please install kind. e.g. 'brew install kind' for MacOS"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$delete_kind_cluster" == "yes" ]]; then
|
||||
echo "Deleting running kind clusters to prepare a clean slate..."
|
||||
"$pinniped_path"/hack/kind-down.sh
|
||||
fi
|
||||
|
||||
if [[ "$skip_build" == "yes" ]]; then
|
||||
"$pinniped_path"/hack/prepare-for-integration-tests.sh --skip-build
|
||||
else
|
||||
"$pinniped_path"/hack/prepare-for-integration-tests.sh
|
||||
fi
|
||||
|
||||
source /tmp/integration-test-env
|
||||
|
||||
ulimit -n 512
|
||||
|
||||
echo
|
||||
echo "Running integration tests..."
|
||||
go test -race -v -count 1 -timeout 0 ./test/integration
|
||||
echo "ALL INTEGRATION TESTS PASSED"
|
||||
36
hack/setup-fly.sh
Executable file
36
hack/setup-fly.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Define some env vars
|
||||
source "$script_dir/fly-helpers.sh"
|
||||
|
||||
# Install the fly cli if needed
|
||||
if [[ ! -f "$FLY_CLI" ]]; then
|
||||
curl -fL "$CONCOURSE_URL/api/v1/cli?arch=amd64&platform=darwin" -o "$FLY_CLI"
|
||||
chmod 755 "$FLY_CLI"
|
||||
fi
|
||||
|
||||
if $FLY_CLI targets | grep ^"$CONCOURSE_TARGET" | grep -q 'https://ci\.pinniped\.dev'; then
|
||||
# The user has the old ci.pinniped.dev target. Remove it so we can replace it.
|
||||
$FLY_CLI delete-target --target "$CONCOURSE_TARGET"
|
||||
fi
|
||||
|
||||
if ! $FLY_CLI targets | tr -s ' ' | cut -f1 -d ' ' | grep -q "$CONCOURSE_TARGET"; then
|
||||
# Create the target if needed
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" login \
|
||||
--team-name "$CONCOURSE_TEAM" --concourse-url "$CONCOURSE_URL"
|
||||
else
|
||||
# Login if needed
|
||||
if ! $FLY_CLI --target "$CONCOURSE_TARGET" status; then
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" login
|
||||
fi
|
||||
fi
|
||||
|
||||
# Upgrade fly if needed
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" sync
|
||||
39
hack/update-copyright-year.sh
Executable file
39
hack/update-copyright-year.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "$OSTYPE" != "darwin"* ]]; then
|
||||
echo "This script was only written for MacOS (due to differences with Linux sed flags)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
files=$(git diff --cached --name-only)
|
||||
year=$(date +"%Y")
|
||||
|
||||
missing_copyright_files=()
|
||||
|
||||
for f in $files; do
|
||||
head -10 "$f" | grep -i 'Copyright.*the Pinniped contributors' 2>&1 1>/dev/null || continue
|
||||
|
||||
if ! head -10 "$f" | grep -i -e "Copyright.*$year.*the Pinniped contributors" 2>&1 1>/dev/null; then
|
||||
missing_copyright_files+=("$f")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${#missing_copyright_files[@]}" -gt "0" ]]; then
|
||||
echo "Fixing copyright notice in the following files:"
|
||||
for f in "${missing_copyright_files[@]}"; do
|
||||
echo " $f"
|
||||
# The rule when updating copyrights is to always keep the starting year,
|
||||
# and to replace the ending year with the current year.
|
||||
# This uses MacOS sed flags to replace "XXXX-YYYY" with "XXXX-year" in the copyright notice.
|
||||
sed -E -e 's/Copyright ([0-9]{4})-([0-9]{4}) the Pinniped contributors/Copyright \1-'"$year"' the Pinniped contributors/' -i '' "$f"
|
||||
# This uses MacOS sed flags to replace "XXXX" with "XXXX-year" in the copyright notice.
|
||||
sed -E -e 's/Copyright ([0-9]{4}) the Pinniped contributors/Copyright \1-'"$year"' the Pinniped contributors/' -i '' "$f"
|
||||
done
|
||||
echo "Done!"
|
||||
exit 1
|
||||
fi
|
||||
62
infra/README.md
Normal file
62
infra/README.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Installing and operating Concourse
|
||||
|
||||
Concourse is made up of a web deployment a worker deployment.
|
||||
|
||||
## Terraform
|
||||
|
||||
We use Terraform to create and update the IaaS infrastructure on which we run all the Concourse components.
|
||||
This infrastructure must be created before deploying the corresponding Concourse components.
|
||||
|
||||
### Infrastructure Providers
|
||||
|
||||
We use Google Cloud for the infrastructure.
|
||||
|
||||
### Running Terraform
|
||||
|
||||
See [infra/terraform/gcloud/README.md](./terraform/gcloud/README.md) for details of using Terraform
|
||||
to create or update the Google Cloud infrastructure for Concourse. This infrastructure will be used
|
||||
to run the web and internal workers.
|
||||
|
||||
## Bootstrapping Secrets (after Terraform)
|
||||
|
||||
Before deploying Concourse for the first time, the
|
||||
[infra/concourse-install/bootstrap-secrets.sh](./concourse-install/bootstrap-secrets.sh)
|
||||
script must be used to auto-generate some values and store them in a new secret in the Secrets Manager.
|
||||
This script only needs to be run once.
|
||||
|
||||
1. Create a github oauth client as described in https://concourse-ci.org/github-auth.html.
|
||||
The callback URI should be set to `https://ci.pinniped.broadcom.net/sky/issuer/callback`.
|
||||
Take note of the client ID and client secret for use in the next step.
|
||||
2. Run `GITHUB_CLIENT_ID=<your_client_id> GITHUB_CLIENT_SECRET=<your_client_secret> ./bootstrap-secrets.sh`.
|
||||
This will create a secret in the GCP Secrets Manager which includes the GitHub client info
|
||||
along with some auto-generated secrets.
|
||||
3. If you need to change the GitHub client's ID or secret later, edit the secret in GCP Secrets Manager,
|
||||
and then redeploy the web deployment.
|
||||
|
||||
## Web Deployment
|
||||
|
||||
The "brains" of Concourse is its web deployment. It can be created and updated by running the
|
||||
[infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh)
|
||||
script on your laptop.
|
||||
|
||||
## Worker Deployments
|
||||
|
||||
We run our workers on the same GKE cluster where we run the web component.
|
||||
|
||||
See [infra/concourse-install/*-internal-workers.sh](./concourse-install) for scripts to deploy/update the workers,
|
||||
scale the workers, and view the workers.
|
||||
|
||||
These workers can also be scaled by the jobs in the `concourse-workers` pipeline.
|
||||
|
||||
## Upgrading Concourse
|
||||
|
||||
To upgrade each deployment to a new version of Concourse:
|
||||
|
||||
1. If any infrastructure updates are needed, follow the terraform instructions again.
|
||||
2. Change the version of the Helm Chart in the source code of the script used to create each deployment,
|
||||
and then run each script to upgrade the deployment. Note that this will scale the internal workers deployment
|
||||
back to its default number of replicas.
|
||||
1. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-web.sh)
|
||||
2. [infra/concourse-install/deploy-concourse-web.sh](./concourse-install/deploy-concourse-internal-workers.sh)
|
||||
3. Commit and push those script changes.
|
||||
4. Trigger the CI jobs to scale the internal workers back to the desired number as needed.
|
||||
71
infra/concourse-install/bootstrap-secrets.sh
Executable file
71
infra/concourse-install/bootstrap-secrets.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Require env vars.
|
||||
if [[ -z "${GITHUB_CLIENT_ID:-}" ]]; then
|
||||
echo "GITHUB_CLIENT_ID env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${GITHUB_CLIENT_SECRET:-}" ]]; then
|
||||
echo "GITHUB_CLIENT_SECRET env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check pre-reqs.
|
||||
if ! command -v gcloud &>/dev/null; then
|
||||
echo "Please install the gcloud CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v yq &>/dev/null; then
|
||||
echo "Please install the yq CLI"
|
||||
exit
|
||||
fi
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temporary directory for secrets, cleaned up at the end of this script.
|
||||
trap 'rm -rf "$TEMP_DIR"' EXIT
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
# Create the three keys required to install the Concourse web component.
|
||||
# See https://github.com/concourse/concourse-chart/tree/master#secrets
|
||||
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t rsa -f /keys/session-signing-key
|
||||
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/worker-key
|
||||
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/host-key
|
||||
# Create an extra keypair for our external workers so they can use a different private key
|
||||
# to avoid sharing the private key of the internal workers to other Kubernetes clusters.
|
||||
docker run -v "$TEMP_DIR":/keys --rm -it concourse/concourse generate-key -t ssh -f /keys/external-worker-key
|
||||
|
||||
# Create an encryption key for DB encryption at rest.
|
||||
printf "%s" "$(openssl rand -base64 24)" >"$TEMP_DIR/encryption-key"
|
||||
|
||||
# Write a tmp yaml file which bundles together all of the secrets from above.
|
||||
# The structure of the keys in this file matches the concourse helm chart's values.yaml inputs,
|
||||
# except for .secrets.externalWorkerKey which is our own custom key.
|
||||
SECRETS_FILE="$TEMP_DIR/secrets.yaml"
|
||||
echo "# This secret is auto-generated by infra/concourse-install/bootstrap-secrets.sh" >"$SECRETS_FILE"
|
||||
yq -i e ".secrets.hostKey = \"$(cat "$TEMP_DIR/host-key")\"" "$SECRETS_FILE" # TSA host key
|
||||
yq -i e ".secrets.hostKeyPub = \"$(cat "$TEMP_DIR/host-key.pub")\"" "$SECRETS_FILE" # TSA host key pub
|
||||
yq -i e ".secrets.sessionSigningKey = \"$(cat "$TEMP_DIR/session-signing-key")\"" "$SECRETS_FILE"
|
||||
yq -i e ".secrets.workerKey = \"$(cat "$TEMP_DIR/worker-key")\"" "$SECRETS_FILE"
|
||||
yq -i e ".secrets.externalWorkerKey = \"$(cat "$TEMP_DIR/external-worker-key")\"" "$SECRETS_FILE"
|
||||
# Put both public keys into the workerKeyPub secret, one on each line.
|
||||
yq -i e ".secrets.workerKeyPub = \"$(cat "$TEMP_DIR/worker-key.pub" "$TEMP_DIR/external-worker-key.pub")\"" "$SECRETS_FILE"
|
||||
yq -i e ".secrets.encryptionKey = \"$(cat "$TEMP_DIR/encryption-key")\"" "$SECRETS_FILE"
|
||||
yq -i e ".secrets.githubClientId = \"$GITHUB_CLIENT_ID\"" "$SECRETS_FILE"
|
||||
yq -i e ".secrets.githubClientSecret = \"$GITHUB_CLIENT_SECRET\"" "$SECRETS_FILE"
|
||||
|
||||
# Save the tmp yaml file into the GCP Secrets Manager for later use.
|
||||
gcloud secrets create concourse-install-bootstrap \
|
||||
--data-file "$SECRETS_FILE" \
|
||||
--project "$PINNIPED_GCP_PROJECT"
|
||||
71
infra/concourse-install/delete-concourse-internal-workers.sh
Executable file
71
infra/concourse-install/delete-concourse-internal-workers.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# This script deletes the concourse worker from our GKE environment using Helm.
|
||||
|
||||
HELM_RELEASE_NAME="concourse-workers"
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
if ! command -v gcloud &>/dev/null; then
|
||||
echo "Please install the gcloud CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v yq &>/dev/null; then
|
||||
echo "Please install the yq CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v kubectl &>/dev/null; then
|
||||
echo "Please install the kubectl CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v helm &>/dev/null; then
|
||||
echo "Please install the helm CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v terraform &>/dev/null; then
|
||||
echo "Please install the terraform CLI"
|
||||
exit
|
||||
fi
|
||||
# This is needed for running gcloud commands.
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
# This is needed for running terraform commands.
|
||||
if ! gcloud auth application-default print-access-token --quiet &>/dev/null; then
|
||||
echo "Please run \`gcloud auth application-default login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temporary directory for secrets, cleaned up at the end of this script.
|
||||
trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT
|
||||
DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml"
|
||||
|
||||
# Get the output values from terraform.
|
||||
pushd "$script_dir/../terraform/gcloud" >/dev/null
|
||||
terraform output --json >"$TERRAFORM_OUTPUT_FILE"
|
||||
popd >/dev/null
|
||||
|
||||
CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
|
||||
# Download the admin kubeconfig for the cluster.
|
||||
export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml"
|
||||
gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE"
|
||||
chmod 0600 "$KUBECONFIG"
|
||||
|
||||
# Dump out the cluster info for diagnostic purposes.
|
||||
kubectl cluster-info
|
||||
|
||||
# Delete the helm chart.
|
||||
helm uninstall -n concourse-worker "$HELM_RELEASE_NAME" \
|
||||
--debug \
|
||||
--wait
|
||||
115
infra/concourse-install/deploy-concourse-internal-workers.sh
Executable file
115
infra/concourse-install/deploy-concourse-internal-workers.sh
Executable file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# This script deploys the concourse worker component into our GKE environment using Helm
|
||||
# and secrets from GCP and Terraform.
|
||||
|
||||
HELM_RELEASE_NAME="concourse-workers"
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
if ! command -v gcloud &>/dev/null; then
|
||||
echo "Please install the gcloud CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v yq &>/dev/null; then
|
||||
echo "Please install the yq CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v kubectl &>/dev/null; then
|
||||
echo "Please install the kubectl CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v helm &>/dev/null; then
|
||||
echo "Please install the helm CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v ytt &>/dev/null; then
|
||||
echo "Please install the ytt CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v terraform &>/dev/null; then
|
||||
echo "Please install the terraform CLI"
|
||||
exit
|
||||
fi
|
||||
# This is needed for running gcloud commands.
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
# This is needed for running terraform commands.
|
||||
if ! gcloud auth application-default print-access-token --quiet &>/dev/null; then
|
||||
echo "Please run \`gcloud auth application-default login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add/update the concourse helm repository.
|
||||
helm repo add concourse https://concourse-charts.storage.googleapis.com/
|
||||
helm repo update concourse
|
||||
|
||||
# Create a temporary directory for secrets, cleaned up at the end of this script.
|
||||
trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT
|
||||
DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml"
|
||||
|
||||
# Get the output values from terraform.
|
||||
pushd "$script_dir/../terraform/gcloud" >/dev/null
|
||||
terraform output --json >"$TERRAFORM_OUTPUT_FILE"
|
||||
popd >/dev/null
|
||||
|
||||
CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
|
||||
# Download the admin kubeconfig for the cluster.
|
||||
export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml"
|
||||
gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE"
|
||||
chmod 0600 "$KUBECONFIG"
|
||||
|
||||
# Download some secrets. These were created once by bootstrap-secrets.sh.
|
||||
BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml"
|
||||
gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE"
|
||||
|
||||
TSA_HOST_KEY_PUB=$(yq eval '.secrets.hostKeyPub' "$BOOTSTRAP_SECRETS_FILE")
|
||||
WORKER_PRIVATE_KEY=$(yq eval '.secrets.workerKey' "$BOOTSTRAP_SECRETS_FILE")
|
||||
|
||||
# Dump out the cluster info for diagnostic purposes.
|
||||
kubectl cluster-info
|
||||
|
||||
# Some of the configuration options used below were inspired by how HushHouse runs on GKE.
|
||||
# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/workers/values.yaml
|
||||
|
||||
# Install/upgrade the helm chart.
|
||||
# These settings are documented in https://github.com/concourse/concourse-chart/blob/master/values.yaml
|
||||
# Note that `--version` chooses the version of the concourse/concourse chart. Each version of the chart
|
||||
# chooses which version of Concourse to install by defaulting the value for `imageTag` in its values.yaml file.
|
||||
helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \
|
||||
--version 17.3.1 \
|
||||
--debug \
|
||||
--install \
|
||||
--wait \
|
||||
--create-namespace \
|
||||
--namespace concourse-worker \
|
||||
--values "$script_dir/internal-workers/values-workers.yaml" \
|
||||
--set concourse.worker.tsa.publicKey="$TSA_HOST_KEY_PUB" \
|
||||
--set concourse.worker.tsa.workerPrivateKey="$WORKER_PRIVATE_KEY" \
|
||||
--set secrets.workerKey="$WORKER_PRIVATE_KEY" \
|
||||
--set secrets.hostKeyPub="$TSA_HOST_KEY_PUB" \
|
||||
--post-renderer "$script_dir/internal-workers/ytt-helm-postrender-workers.sh"
|
||||
|
||||
# By default, it will not be possible for the autoscaler to scale down to one node.
|
||||
# The autoscaler logs will show that the kube-dns pod cannot be moved. See
|
||||
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
|
||||
# for how to view and interpret the autoscaler logs.
|
||||
# This seems to be the workaround for the "no.scale.down.node.pod.kube.system.unmovable" error
|
||||
# that we were getting for the kube-dns pod in the logs.
|
||||
kubectl create poddisruptionbudget kube-dns-pdb \
|
||||
--namespace=kube-system \
|
||||
--selector k8s-app=kube-dns \
|
||||
--max-unavailable 1 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
137
infra/concourse-install/deploy-concourse-web.sh
Executable file
137
infra/concourse-install/deploy-concourse-web.sh
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# This script deploys the concourse web component into our GKE environment using Helm
|
||||
# and secrets from GCP and Terraform.
|
||||
|
||||
HELM_RELEASE_NAME="concourse-web"
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
if ! command -v gcloud &>/dev/null; then
|
||||
echo "Please install the gcloud CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v yq &>/dev/null; then
|
||||
echo "Please install the yq CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v kubectl &>/dev/null; then
|
||||
echo "Please install the kubectl CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v helm &>/dev/null; then
|
||||
echo "Please install the helm CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v ytt &>/dev/null; then
|
||||
echo "Please install the ytt CLI"
|
||||
exit
|
||||
fi
|
||||
if ! command -v terraform &>/dev/null; then
|
||||
echo "Please install the terraform CLI"
|
||||
exit
|
||||
fi
|
||||
# This is needed for running gcloud commands.
|
||||
if ! gcloud auth print-access-token &>/dev/null; then
|
||||
echo "Please run \`gcloud auth login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
# This is needed for running terraform commands.
|
||||
if ! gcloud auth application-default print-access-token --quiet &>/dev/null; then
|
||||
echo "Please run \`gcloud auth application-default login\` and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add/update the concourse helm repository.
|
||||
helm repo add concourse https://concourse-charts.storage.googleapis.com/
|
||||
helm repo update concourse
|
||||
|
||||
# Create a temporary directory for secrets, cleaned up at the end of this script.
|
||||
trap 'rm -rf "$DEPLOY_TEMP_DIR"' EXIT
|
||||
DEPLOY_TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
TERRAFORM_OUTPUT_FILE="$DEPLOY_TEMP_DIR/terraform-outputs.yaml"
|
||||
|
||||
# Get the output values from terraform.
|
||||
pushd "$script_dir/../terraform/gcloud" >/dev/null
|
||||
terraform output --json >"$TERRAFORM_OUTPUT_FILE"
|
||||
popd >/dev/null
|
||||
|
||||
CLUSTER_NAME=$(yq eval '.cluster-name.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
PROJECT=$(yq eval '.project.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
ZONE=$(yq eval '.zone.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
WEB_IP_ADDRESS=$(yq eval '.web-ip.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
WEB_HOSTNAME="ci.pinniped.broadcom.net"
|
||||
DB_IP_ADDRESS=$(yq eval '.database-ip.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
DB_USERNAME=$(yq eval '.database-username.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
DB_PASSWORD=$(yq eval '.database-password.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
DB_CA_CERT=$(yq eval '.database-ca-cert.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
DB_CLIENT_CERT=$(yq eval '.database-cert.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
DB_CLIENT_KEY=$(yq eval '.database-private-key.value' "$TERRAFORM_OUTPUT_FILE")
|
||||
|
||||
# Download the admin kubeconfig for the cluster.
|
||||
export KUBECONFIG="$DEPLOY_TEMP_DIR/kubeconfig.yaml"
|
||||
gcloud container clusters get-credentials "$CLUSTER_NAME" --project "$PROJECT" --zone "$ZONE"
|
||||
chmod 0600 "$KUBECONFIG"
|
||||
|
||||
# Download some secrets. These were created once by bootstrap-secrets.sh.
|
||||
BOOTSTRAP_SECRETS_FILE="$DEPLOY_TEMP_DIR/concourse-install-bootstrap.yaml"
|
||||
gcloud secrets versions access latest --secret="concourse-install-bootstrap" --project "$PROJECT" >"$BOOTSTRAP_SECRETS_FILE"
|
||||
|
||||
# Download the TLS cert for ci.pinniped.broadcom.net which was manually added as a secret.
|
||||
TLS_SECRETS_FILE="$DEPLOY_TEMP_DIR/tls-cert.yaml"
|
||||
gcloud secrets versions access latest --secret="ci-pinniped-broadcom-net-tls-cert" --project "$PROJECT" >"$TLS_SECRETS_FILE"
|
||||
TLS_CERT="$(yq eval '."cert.pem"' "$TLS_SECRETS_FILE")"
|
||||
TLS_KEY="$(yq eval '."key.pem"' "$TLS_SECRETS_FILE")"
|
||||
|
||||
# Dump out the cluster info for diagnostic purposes.
|
||||
kubectl cluster-info
|
||||
|
||||
# Configure ip-masq-agent to allow the pods to reach the private IP of the Cloud SQL server.
|
||||
kubectl apply -f "$script_dir/web/ip-masq-agent-configmap.yaml"
|
||||
|
||||
# Some of the configuration options used below were inspired by how HushHouse runs on GKE.
|
||||
# See https://github.com/concourse/hush-house/blob/master/deployments/with-creds/hush-house/values.yaml
|
||||
|
||||
# Install/upgrade the helm chart.
|
||||
# These settings are documented in https://github.com/concourse/concourse-chart/blob/master/values.yaml
|
||||
# Note that `--version` chooses the version of the concourse/concourse chart. Each version of the chart
|
||||
# chooses which version of Concourse to install by defaulting the value for `imageTag` in its values.yaml file.
|
||||
helm upgrade "$HELM_RELEASE_NAME" concourse/concourse \
|
||||
--version 17.3.1 \
|
||||
--debug \
|
||||
--install \
|
||||
--wait \
|
||||
--create-namespace \
|
||||
--namespace concourse-web \
|
||||
--values "$script_dir/web/values-web.yaml" \
|
||||
--values "$BOOTSTRAP_SECRETS_FILE" \
|
||||
--set web.service.api.loadBalancerIP="$WEB_IP_ADDRESS" \
|
||||
--set web.service.workerGateway.loadBalancerIP="$WEB_IP_ADDRESS" \
|
||||
--set concourse.web.externalUrl="https://$WEB_HOSTNAME" \
|
||||
--set concourse.web.postgres.host="$DB_IP_ADDRESS" \
|
||||
--set secrets.postgresUser="$DB_USERNAME" \
|
||||
--set secrets.postgresPassword="$DB_PASSWORD" \
|
||||
--set secrets.postgresCaCert="$DB_CA_CERT" \
|
||||
--set secrets.postgresClientCert="$DB_CLIENT_CERT" \
|
||||
--set secrets.postgresClientKey="$DB_CLIENT_KEY" \
|
||||
--set secrets.webTlsCert="$TLS_CERT" \
|
||||
--set secrets.webTlsKey="$TLS_KEY" \
|
||||
--post-renderer "$script_dir/web/ytt-helm-postrender-web.sh"
|
||||
|
||||
# By default, it will not be possible for the autoscaler to scale down to one node.
|
||||
# The autoscaler logs will show that the kube-dns pod cannot be moved. See
|
||||
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
|
||||
# for how to view and interpret the autoscaler logs.
|
||||
# This seems to be the workaround for the "no.scale.down.node.pod.kube.system.unmovable" error
|
||||
# that we were getting for the kube-dns pod in the logs.
|
||||
kubectl create poddisruptionbudget kube-dns-pdb \
|
||||
--namespace=kube-system \
|
||||
--selector k8s-app=kube-dns \
|
||||
--max-unavailable 1 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
@@ -0,0 +1,24 @@
|
||||
#! Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#! Add resource requests and limits to the initContainer so the whole pod can be assigned "Guaranteed" QoS.
|
||||
#! All containers must have requests equal to limits, including the initContainers.
|
||||
|
||||
#@overlay/match by=overlay.subset({"kind": "StatefulSet", "metadata":{"name":"concourse-worker"}}), expects=1
|
||||
---
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
initContainers:
|
||||
- #@overlay/match by="name"
|
||||
name: concourse-worker-init-rm
|
||||
#@overlay/match missing_ok=True
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
79
infra/concourse-install/internal-workers/values-workers.yaml
Normal file
79
infra/concourse-install/internal-workers/values-workers.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-worker".
|
||||
fullnameOverride: concourse
|
||||
|
||||
web:
|
||||
enabled: false
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
|
||||
worker:
|
||||
# In an effort to save money, default to 1 worker.
|
||||
replicas: 1
|
||||
nodeSelector: { cloud.google.com/gke-nodepool: workers-1 } # the name of the nodepool from terraform
|
||||
hardAntiAffinity: true
|
||||
minAvailable: 0
|
||||
terminationGracePeriodSeconds: 3600
|
||||
livenessProbe:
|
||||
periodSeconds: 60
|
||||
failureThreshold: 10
|
||||
timeoutSeconds: 45
|
||||
resources:
|
||||
# Inspired by https://github.com/concourse/hush-house/blob/16f52e57c273282ebace68051b0fe9133dc3a04e/deployments/with-creds/workers/values.yaml#L30-L32
|
||||
#
|
||||
# Note that Kubernetes uses Ki (Kibibytes) and Gi (Gibibytes). You can do conversions by doing google
|
||||
# searches using the more commonly used names for those units, e.g. searching "29061248 KiB to GiB".
|
||||
#
|
||||
# Limit to using all available CPUs and most of the available memory in our e2-standard-8 VM nodes.
|
||||
# According to the "Allocatable" section of the "kubectl describe nodes -l cloud.google.com/gke-nodepool=workers-1" output,
|
||||
# each node has 29061248 Ki, which is equal to 27.7149658203 Gi of memory allocatable,
|
||||
# and each node has 7910m cpu allocatable.
|
||||
#
|
||||
# By making our requests equal to our limits, we should be assigned "Guaranteed" QoS.
|
||||
# But we need to leave enough space for all other pods' requests too, because GKE runs several pods on each node automatically.
|
||||
# The first node in the node pool has the most pods scheduled on it, so we will choose our values based on the first node
|
||||
# by looking at its "Allocated resources" section of the describe output.
|
||||
# CPU:
|
||||
# - On the first node, the other pods' CPU requests total 1324m (16%).
|
||||
# - The available CPU for our pod is 7910m allocatable - 1324m allocated = 6586m remaining.
|
||||
# Memory:
|
||||
# - On the first node, the other pods' memory requests total 1394740096 (bytes) (4%) = 1.298952937126 Gi.
|
||||
# - The available memory for our pod is 27.7149658203 Gi - 1.298952937126 Gi = 26.4160128832 Gi.
|
||||
# However, Google can change these values over time, so we need to leave a little extra room
|
||||
# in case Google's pods take a little more later.
|
||||
#
|
||||
# In order for the pod to be assigned "Guaranteed" QoS, all the containers need to
|
||||
# have requests equal to limits, so the initContainer also has similar settings applied
|
||||
# by the init-container-overlay.yaml overlay.
|
||||
limits:
|
||||
cpu: 6480m
|
||||
memory: 26Gi
|
||||
requests:
|
||||
cpu: 6480m
|
||||
memory: 26Gi
|
||||
|
||||
persistence:
|
||||
worker:
|
||||
size: 375Gi
|
||||
storageClass: premium-rwo
|
||||
|
||||
concourse:
|
||||
worker:
|
||||
# rebalanceInterval: 2h
|
||||
baggageclaim:
|
||||
driver: overlay
|
||||
healthcheckTimeout: 40s
|
||||
runtime: containerd
|
||||
containerd:
|
||||
# networkPool: "10.254.0.0/16"
|
||||
# maxContainers is usually set to 250, but increasing it to see if we can squeeze more from each worker.
|
||||
maxContainers: 300
|
||||
restrictedNetworks:
|
||||
- 169.254.169.254/32
|
||||
tsa:
|
||||
hosts:
|
||||
# This service name must match the name decided by the web deployment
|
||||
- concourse-web-worker-gateway.concourse-web.svc.cluster.local:2222
|
||||
10
infra/concourse-install/internal-workers/ytt-helm-postrender-workers.sh
Executable file
10
infra/concourse-install/internal-workers/ytt-helm-postrender-workers.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
ytt -f "$script_dir/init-container-overlay-workers.yaml" -f-
|
||||
65
infra/concourse-install/scale-down-concourse-internal-workers.sh
Executable file
65
infra/concourse-install/scale-down-concourse-internal-workers.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see
|
||||
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
|
||||
# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node.
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER="pinniped-concourse"
|
||||
PROJECT="$PINNIPED_GCP_PROJECT"
|
||||
ZONE="us-west1-c"
|
||||
STATEFULSET="concourse-worker"
|
||||
NAMESPACE="concourse-worker"
|
||||
NODEPOOL="workers-1"
|
||||
|
||||
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
|
||||
gcloud auth activate-service-account \
|
||||
"$GCP_USERNAME" \
|
||||
--key-file <(echo "$GCP_JSON_KEY") \
|
||||
--project "$PROJECT"
|
||||
fi
|
||||
|
||||
trap 'rm -rf "$TEMP_DIR"' EXIT
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
# Download the admin kubeconfig for the GKE cluster created by terraform.
|
||||
export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml"
|
||||
gcloud container clusters get-credentials "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE"
|
||||
|
||||
current=$(kubectl get statefulset "$STATEFULSET" \
|
||||
--namespace "$NAMESPACE" \
|
||||
--output=jsonpath="{.spec.replicas}" \
|
||||
--kubeconfig="${KUBECONFIG}")
|
||||
|
||||
desired=$((current - 1))
|
||||
|
||||
echo "current scale=$current"
|
||||
echo "desired scale=$desired"
|
||||
|
||||
minNodes=$(gcloud container clusters describe "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE" \
|
||||
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.minNodeCount")
|
||||
|
||||
if [[ $desired -lt $minNodes ]]; then
|
||||
echo "ERROR: will not scale below the cluster autoscaler limit of $minNodes for the node pool"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl scale \
|
||||
--current-replicas=$current \
|
||||
--replicas=$desired \
|
||||
--kubeconfig="${KUBECONFIG}" \
|
||||
--namespace "$NAMESPACE" \
|
||||
"statefulset/$STATEFULSET"
|
||||
92
infra/concourse-install/scale-print-concourse-internal-workers.sh
Executable file
92
infra/concourse-install/scale-print-concourse-internal-workers.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see
|
||||
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
|
||||
# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node.
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Define some env vars
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
# Setup and login if needed
|
||||
"$script_dir/../../hack/setup-fly.sh"
|
||||
|
||||
CLUSTER="pinniped-concourse"
|
||||
PROJECT="$PINNIPED_GCP_PROJECT"
|
||||
ZONE="us-west1-c"
|
||||
STATEFULSET="concourse-worker"
|
||||
NAMESPACE="concourse-worker"
|
||||
NODEPOOL="workers-1"
|
||||
|
||||
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
|
||||
gcloud auth activate-service-account \
|
||||
"$GCP_USERNAME" \
|
||||
--key-file <(echo "$GCP_JSON_KEY") \
|
||||
--project "$PINNIPED_GCP_PROJECT"
|
||||
fi
|
||||
|
||||
trap 'rm -rf "$TEMP_DIR"' EXIT
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
# Download the admin kubeconfig for the GKE cluster created by terraform.
|
||||
export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml"
|
||||
gcloud container clusters get-credentials "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE"
|
||||
|
||||
current=$(kubectl get statefulset "$STATEFULSET" \
|
||||
--namespace "$NAMESPACE" \
|
||||
--output=jsonpath="{.spec.replicas}" \
|
||||
--kubeconfig="${KUBECONFIG}")
|
||||
|
||||
minNodes=$(gcloud container clusters describe "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE" \
|
||||
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.minNodeCount")
|
||||
|
||||
maxNodes=$(gcloud container clusters describe "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE" \
|
||||
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.maxNodeCount")
|
||||
|
||||
echo
|
||||
echo "current scale=$current, min=$minNodes, max=$maxNodes"
|
||||
|
||||
echo
|
||||
echo "Current pods..."
|
||||
kubectl get pods \
|
||||
--output wide \
|
||||
--namespace "$NAMESPACE" \
|
||||
--kubeconfig="${KUBECONFIG}"
|
||||
|
||||
echo
|
||||
echo "Volumes usage for current pods..."
|
||||
kubectl get pods \
|
||||
--namespace "${NAMESPACE}" \
|
||||
--kubeconfig="${KUBECONFIG}" \
|
||||
--template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' \
|
||||
| xargs -n1 -I {} bash -c "echo \"{}: \" && kubectl exec {} -n ${NAMESPACE} -c concourse-worker --kubeconfig ${KUBECONFIG} -- df -ah /concourse-work-dir | sed \"s|^| |\"" \
|
||||
|
||||
echo
|
||||
echo "Current nodes in nodepool $NODEPOOL..."
|
||||
kubectl get nodes \
|
||||
-l cloud.google.com/gke-nodepool=$NODEPOOL \
|
||||
--kubeconfig="${KUBECONFIG}"
|
||||
|
||||
echo
|
||||
echo "Current fly workers..."
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" workers
|
||||
|
||||
echo ""
|
||||
echo "Note: If the number of pods, nodes, and fly workers are not all the same,"
|
||||
echo "and some time has passed since you have changed the scale, then something may be wrong."
|
||||
65
infra/concourse-install/scale-up-concourse-internal-workers.sh
Executable file
65
infra/concourse-install/scale-up-concourse-internal-workers.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# If scaling up or down the worker replicas does not cause the nodes to scale to match, then see
|
||||
# https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler-visibility#debugging_scenarios
|
||||
# Check the CPU and memory limit values documented in values-workers.yaml to see if they still fit onto the first node.
|
||||
|
||||
if [[ -z "${PINNIPED_GCP_PROJECT:-}" ]]; then
|
||||
echo "PINNIPED_GCP_PROJECT env var must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER="pinniped-concourse"
|
||||
PROJECT="$PINNIPED_GCP_PROJECT"
|
||||
ZONE="us-west1-c"
|
||||
STATEFULSET="concourse-worker"
|
||||
NAMESPACE="concourse-worker"
|
||||
NODEPOOL="workers-1"
|
||||
|
||||
if [[ -z "$(gcloud config list account --format "value(core.account)")" ]]; then
|
||||
gcloud auth activate-service-account \
|
||||
"$GCP_USERNAME" \
|
||||
--key-file <(echo "$GCP_JSON_KEY") \
|
||||
--project "$PROJECT"
|
||||
fi
|
||||
|
||||
trap 'rm -rf "$TEMP_DIR"' EXIT
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
|
||||
# Download the admin kubeconfig for the GKE cluster created by terraform.
|
||||
export KUBECONFIG="$TEMP_DIR/kubeconfig.yaml"
|
||||
gcloud container clusters get-credentials "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE"
|
||||
|
||||
current=$(kubectl get statefulset "$STATEFULSET" \
|
||||
--namespace "$NAMESPACE" \
|
||||
--output=jsonpath="{.spec.replicas}" \
|
||||
--kubeconfig="${KUBECONFIG}")
|
||||
|
||||
desired=$((current + 1))
|
||||
|
||||
echo "current scale=$current"
|
||||
echo "desired scale=$desired"
|
||||
|
||||
maxNodes=$(gcloud container clusters describe "$CLUSTER" \
|
||||
--project "$PROJECT" \
|
||||
--zone "$ZONE" \
|
||||
--format json | jq -r ".nodePools[] | select(.name == \"$NODEPOOL\").autoscaling.maxNodeCount")
|
||||
|
||||
if [[ $desired -gt $maxNodes ]]; then
|
||||
echo "ERROR: will not scale above the cluster autoscaler limit of $maxNodes for the node pool"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl scale \
|
||||
--current-replicas=$current \
|
||||
--replicas=$desired \
|
||||
--kubeconfig="${KUBECONFIG}" \
|
||||
--namespace "$NAMESPACE" \
|
||||
"statefulset/$STATEFULSET"
|
||||
24
infra/concourse-install/web/init-container-overlay-web.yaml
Normal file
24
infra/concourse-install/web/init-container-overlay-web.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
#! Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#! Add resource requests and limits to the initContainer so the whole pod can be assigned "Guaranteed" QoS.
|
||||
#! All containers must have requests equal to limits, including the initContainers.
|
||||
|
||||
#@overlay/match by=overlay.subset({"kind": "Deployment", "metadata":{"name":"concourse-web"}}), expects=1
|
||||
---
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
initContainers:
|
||||
- #@overlay/match by="name"
|
||||
name: concourse-migration
|
||||
#@overlay/match missing_ok=True
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
14
infra/concourse-install/web/ip-masq-agent-configmap.yaml
Normal file
14
infra/concourse-install/web/ip-masq-agent-configmap.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
# see internal doc https://bsg-confluence.broadcom.net/pages/viewpage.action?pageId=689720737
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ip-masq-agent
|
||||
namespace: kube-system
|
||||
data:
|
||||
# 240.0.0.0/4 is needed to allow the pod to reach the Cloud SQL server's private IP.
|
||||
# I was told to also add the whole primary IP range of the cluster's subnet, which is 10.31.141.64/27.
|
||||
config: |
|
||||
nonMasqueradeCIDRs:
|
||||
- 240.0.0.0/4
|
||||
- 10.31.141.64/27
|
||||
resyncInterval: 60s
|
||||
123
infra/concourse-install/web/values-web.yaml
Normal file
123
infra/concourse-install/web/values-web.yaml
Normal file
@@ -0,0 +1,123 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Helps decide the name of the Deployment along with other resources and labels. Will be suffixed with "-web".
|
||||
fullnameOverride: concourse
|
||||
|
||||
worker:
|
||||
enabled: false
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
|
||||
web:
|
||||
# In an effort to save money, default to 1 web server.
|
||||
replicas: 1
|
||||
nodeSelector: { cloud.google.com/gke-nodepool: generic-1 } # the name of the nodepool from terraform
|
||||
additionalAffinities:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: concourse-web # see comment on fullnameOverride above
|
||||
release: concourse-web # this must be the same name as the helm release in deploy-concourse-web.sh
|
||||
service:
|
||||
api:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
networking.gke.io/load-balancer-type: "Internal"
|
||||
workerGateway:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
networking.gke.io/load-balancer-type: "Internal"
|
||||
# The first node in the generic-1 nodepool (using e2-highcpu-8 VM) has lots of GKE and Kubernetes pods running on it.
|
||||
# According to the "allocatable" section of the "kubectl get node -o yaml" output, the first node has
|
||||
# 7910m cpu and 6179084 Ki memory (which is about 5.893 Gi).
|
||||
# The total requests from the GKE/Kube pods is 1017m cpu and 1046766976 (bytes) memory (which is about 0.975 Gi).
|
||||
# The difference between the allocatable memory and the requested memory is 4.918 Gi, so we will request slightly
|
||||
# less than that to leave a little headroom on the cluster in case some of these pods get upgraded and decide
|
||||
# to request more in the future. Similarly, the cpu difference is 6893m.
|
||||
resources:
|
||||
requests:
|
||||
cpu: 6400m
|
||||
memory: 4.7Gi
|
||||
limits:
|
||||
cpu: 6400m
|
||||
memory: 4.7Gi
|
||||
strategy:
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
|
||||
concourse:
|
||||
web:
|
||||
localAuth:
|
||||
enabled: false
|
||||
auth:
|
||||
mainTeam:
|
||||
localUser: ""
|
||||
github:
|
||||
# From https://concourse-ci.org/github-auth.html...
|
||||
# "Note that the client must be created under an organization if you want to authorize users based on
|
||||
# organization/team membership. In addition, the GitHub application must have at least read access on
|
||||
# the organization's members. If the client is created under a personal account, only individual users
|
||||
# can be authorized."
|
||||
# We requested that the owner of the vmware-tanzu org create an OIDC client for us.
|
||||
# Because it was created in the org, it should have permissions to read team memberships during a login.
|
||||
# The client ID and client secret are stored in the bootstrap secret in the Secrets Manager
|
||||
# (see infra/README.md for more info about the bootstrap secret).
|
||||
# TODO: this needs to change to be the team in the vmware org. Also need to change the clientID and clientSecret in the concourse-install-bootstrap GCP secret for one in the vmware org.
|
||||
# team: vmware-tanzu:pinniped-owners
|
||||
# Temporarily just list which specific users are admins instead.
|
||||
user: cfryanr,joshuatcasey
|
||||
github:
|
||||
enabled: true
|
||||
bindPort: 80
|
||||
clusterName: pinniped-ci
|
||||
# containerPlacementStrategy: random
|
||||
defaultDaysToRetainBuildLogs: 60
|
||||
# enableAcrossStep: true
|
||||
# enablePipelineInstances: true
|
||||
# enableBuildAuditing: true
|
||||
# enableContainerAuditing: true
|
||||
# enableGlobalResources: true
|
||||
# enableJobAuditing: true
|
||||
# enablePipelineAuditing: true
|
||||
# enableResourceAuditing: true
|
||||
# enableSystemAuditing: true
|
||||
# enableTeamAuditing: true
|
||||
# enableVolumeAuditing: true
|
||||
# enableWorkerAuditing: true
|
||||
enableCacheStreamedVolumes: true
|
||||
enableResourceCausality: true
|
||||
enableRedactSecrets: true
|
||||
baggageclaimResponseHeaderTimeout: 10m
|
||||
encryption:
|
||||
enabled: true
|
||||
kubernetes:
|
||||
keepNamespaces: true
|
||||
tls:
|
||||
enabled: true
|
||||
bindPort: 443
|
||||
postgres:
|
||||
database: atc
|
||||
sslmode: verify-ca
|
||||
gc:
|
||||
# See https://concourse-ci.org/performance-tuning.html#concourse_gc_failed_grace_period.
|
||||
# Defaults to 5 days. This means that when lots of jobs in a pipeline fail, all of those
|
||||
# containers will stick around for 5 days, causing you to quickly reach the max containers
|
||||
# per worker and start seeing orange jobs complaining that they cannot start containers.
|
||||
# Its nice for debugging when you can hijack a container of a job that failed a long time
|
||||
# ago, but it comes at the cost of needing more workers to hold on to those containers.
|
||||
failedGracePeriod: 10m
|
||||
# logLevel: debug
|
||||
tsa:
|
||||
# logLevel: debug
|
||||
|
||||
secrets:
|
||||
localUsers: ""
|
||||
10
infra/concourse-install/web/ytt-helm-postrender-web.sh
Executable file
10
infra/concourse-install/web/ytt-helm-postrender-web.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
ytt -f "$script_dir/init-container-overlay-web.yaml" -f-
|
||||
61
infra/terraform/gcloud/.terraform.lock.hcl
generated
Normal file
61
infra/terraform/gcloud/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,61 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/google" {
|
||||
version = "6.40.0"
|
||||
constraints = "~> 6.0"
|
||||
hashes = [
|
||||
"h1:wCQBpao7//BaEDQLdmqfcHlTqABT7BeeKdPJrf8V21w=",
|
||||
"zh:0c304517a2a26f78d058491a2041088dcd4dec9207219ca75a644e734e8394a8",
|
||||
"zh:2df309e86e0d2edc65099e0e47bc9bc91172dce62e59d579dc5132337719d7f8",
|
||||
"zh:4dfb3c5775dcae2f93f3e9affe52a2987aba76b35844883b188d236f5fb485d0",
|
||||
"zh:5943c1fe00bbd63c5be3813c16ba225ca10b1d694e8ead0e8fc4ebd54e9d0b9c",
|
||||
"zh:6ed84e95400f4e27b32fa56832ea47a350cbe581fbae76f5ddabf98f18f44f02",
|
||||
"zh:77bccedaf8fd1807a8020baf422897e5487f5f182b13ee29a6e8c58024ee22be",
|
||||
"zh:9e486f71a714f10cd0d0c0df04d4a8f2cd5c33518131a214b11f3624c683ea10",
|
||||
"zh:c4598d6c6595e8a1cdd637ffc9af4381cb1cb856f9c14ea5dcc675378b01cca6",
|
||||
"zh:dcba35d7cd1793b6ca2ef63ccd1737ce669b31d14161f0a0f2e3aa8d0d5c8793",
|
||||
"zh:ed661f2c233bcd56360731f7f21dca8a94f58ec27f4e3b468d27711938812146",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
"zh:fe09c7cb7a448aab121bad8ca857acbf33e00bbc0c2b25824c71ff3be2e629e4",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/google-beta" {
|
||||
version = "6.40.0"
|
||||
constraints = "~> 6.0"
|
||||
hashes = [
|
||||
"h1:R5yc207FnSQvJxNJwVD6xo0wwXPWI+CsgxXak+skiBs=",
|
||||
"zh:003d3bfd2a39a950e7e5865e5b74a630594710a21990f892c3fb4c9193f532b0",
|
||||
"zh:0f1e455cc73e288c8e047dd4587bc0ec7389855d4a949c853adcbf0a4aa19bb2",
|
||||
"zh:12be1e25e2c51c8fb8dee0f4ed3bb43706b073027a895c6794c2755cbbc05a18",
|
||||
"zh:3688208f155ea04dbfa3ba08d761cd3ae4ba342d8e5fdb65a659f1d72a8d8fc7",
|
||||
"zh:4a71281ca84e3ab028a89935779b7cc6417ec9a54da5233a52fa5a062235fc61",
|
||||
"zh:5c4798d3265d1768c18b8376663e1642c0ad5c554f6670633938b570eee4f6b8",
|
||||
"zh:64e8d57530352b87480f22efd3cf7c4bca40e8c8fb60118615af761f3c480d6b",
|
||||
"zh:7a6ebb211ea05acab41bd9f0039155e618f783bc0462d708a7e6c30827dcf644",
|
||||
"zh:978524cb2a1ceab019232f66e29eed5b4bbc70ba71837c824935a139b86010d4",
|
||||
"zh:9cad3dbf1b98ae30a5c27b10c7a6c85ebce9fb3332a65ac868e3499664883d26",
|
||||
"zh:f0da73f9d9d53d499b69f11421a56fd48ba6aff98b33ba1fe2bf4c4cf0f917f1",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.7.2"
|
||||
hashes = [
|
||||
"h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=",
|
||||
"zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f",
|
||||
"zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc",
|
||||
"zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab",
|
||||
"zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3",
|
||||
"zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212",
|
||||
"zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34",
|
||||
"zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967",
|
||||
"zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d",
|
||||
"zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62",
|
||||
"zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0",
|
||||
]
|
||||
}
|
||||
46
infra/terraform/gcloud/README.md
Normal file
46
infra/terraform/gcloud/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Terraform for Google Cloud Concourse Infrastructure
|
||||
|
||||
We used Terraform to create the infra needed for running our own Concourse.
|
||||
This includes things like a GKE cluster, a static IP, a DNS entry, and a Postgres database.
|
||||
|
||||
NOTE: Do not manually edit these resources using the Google Cloud UI, API, or CLI.
|
||||
Instead, please update the `.tf` files and follow the below steps again.
|
||||
|
||||
To run Terraform to create or update the infrastructure:
|
||||
|
||||
1. If running for the first time ever, log in to the GCP Console for the project and
|
||||
create the GCS storage bucket where terraform will save its state (see [gcp.tf](gcp.tf) for the bucket name).
|
||||
Creating the bucket in one region (see [variables.tf](variables.tf) for the region name)
|
||||
with otherwise default options should suffice.
|
||||
2. Install the `gcloud` CLI and authenticate as yourself using `gcloud auth login`, if you haven't already.
|
||||
3. Use `gcloud auth application-default login` if you haven't already. This is not optional. If you forget this step,
|
||||
terraform will complain that it cannot read the state from the GCP bucket file.
|
||||
4. Install terraform if you haven't already. Use brew to install terraform,
|
||||
or use `brew install tfenv` and then use tfenv to install Terraform.
|
||||
At the time of last updating this README, we were using Terraform v1.12.2.
|
||||
5. cd into this directory: `cd infra/terraform/gcloud`
|
||||
6. Run `TF_VAR_project=$PINNIPED_GCP_PROJECT terraform init`, if you haven't already for this directory.
|
||||
This assumes that you have already exported an env var called `PINNIPED_GCP_PROJECT`
|
||||
whose value is the name of the GCP project.
|
||||
7. Run `terraform fmt`.
|
||||
8. Run `terraform validate`.
|
||||
9. Run
|
||||
`TF_VAR_project=$PINNIPED_GCP_PROJECT TF_VAR_sharedVPCProject=$VPC_PROJECT TF_VAR_networkName=$VPC_NAME TF_VAR_concourseSubnetName=$SUBNET_NAME terraform plan`.
|
||||
This assumes that you have already exported an env var called `PINNIPED_GCP_PROJECT`
|
||||
whose value is the name of the GCP project, along with `VPC_PROJECT` which is the name
|
||||
of another GCP project which is sharing a VPC network to our project, `VPC_NAME` which is
|
||||
the name of that shared VPC, and `SUBNET_NAME` which is the name of a subnet from that
|
||||
shared VPC that we want to give to our Concourse GKE cluster.
|
||||
This command is a dry-run which will print what the `apply` command would perform.
|
||||
10. If you are happy with the output of `terraform plan`, then run
|
||||
`TF_VAR_project=$PINNIPED_GCP_PROJECT TF_VAR_sharedVPCProject=$VPC_PROJECT TF_VAR_networkName=$VPC_NAME TF_VAR_concourseSubnetName=$SUBNET_NAME terraform apply`
|
||||
to really create/update/delete the resources.
|
||||
|
||||
If you do not need to run `terraform apply` because someone else has already done that,
|
||||
then you still need to follow the above directions up to and including running `terraform init`
|
||||
to set up terraform on your computer.
|
||||
|
||||
To delete the entire Concourse deployment and all its related cloud infrastructure, use `terraform destroy`.
|
||||
You may need to use `terraform apply` to set `deletion_protection=false` on some resources first (see Terraform docs).
|
||||
There is no way to undo `terraform destroy`. This will also delete the Cloud SQL database which contains all CI job
|
||||
history.
|
||||
26
infra/terraform/gcloud/address/main.tf
Normal file
26
infra/terraform/gcloud/address/main.tf
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# "data" reads a pre-existing resource without trying to manage its state.
|
||||
# This subnet is shared with us from another GCP project.
|
||||
data "google_compute_subnetwork" "existing_subnet_for_concourse" {
|
||||
project = var.sharedVPCProject
|
||||
name = var.concourseSubnetName
|
||||
}
|
||||
|
||||
# Reserved internal static IPv4 address for the `web` instances.
|
||||
# This is needed so that we can have a static IP for `ci.pinniped.broadcom.net`.
|
||||
resource "google_compute_address" "main" {
|
||||
name = "ci-pinniped-dev"
|
||||
description = "static IP address reserved for Concourse web interface"
|
||||
subnetwork = data.google_compute_subnetwork.existing_subnet_for_concourse.id
|
||||
address_type = "INTERNAL"
|
||||
|
||||
# Allow it to be shared by multiple load balancers (each with different ports).
|
||||
# We will have one for web and one for web-worker-gateway.
|
||||
purpose = "SHARED_LOADBALANCER_VIP"
|
||||
|
||||
# Manually picked an IP from the range that did not cause an error when entered
|
||||
# into GCP's "VPC Network / IP address / Reserve internal static IP" UI for this subnet.
|
||||
address = "10.31.141.90"
|
||||
}
|
||||
6
infra/terraform/gcloud/address/outputs.tf
Normal file
6
infra/terraform/gcloud/address/outputs.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
output "ip" {
|
||||
value = google_compute_address.main.address
|
||||
}
|
||||
12
infra/terraform/gcloud/address/variables.tf
Normal file
12
infra/terraform/gcloud/address/variables.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
variable "sharedVPCProject" {
|
||||
description = "Name of the GCP project which contains the shared VPC."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "concourseSubnetName" {
|
||||
description = "Name of the GCP subnet to use for concourse."
|
||||
type = string
|
||||
}
|
||||
141
infra/terraform/gcloud/cluster/main.tf
Normal file
141
infra/terraform/gcloud/cluster/main.tf
Normal file
@@ -0,0 +1,141 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# "data" reads a pre-existing resource without trying to manage its state.
|
||||
data "google_compute_network" "existing_network" {
|
||||
project = var.sharedVPCProject
|
||||
name = var.networkName
|
||||
}
|
||||
|
||||
# This subnet is shared with us from another GCP project.
|
||||
data "google_compute_subnetwork" "existing_subnet" {
|
||||
project = var.sharedVPCProject
|
||||
name = var.subnetName
|
||||
}
|
||||
|
||||
data "google_service_account" "default" {
|
||||
account_id = "terraform"
|
||||
}
|
||||
|
||||
# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster
|
||||
resource "google_container_cluster" "main" {
|
||||
# Allow "terraform destroy" for this cluster.
|
||||
# deletion_protection = false
|
||||
|
||||
name = var.name
|
||||
location = var.zone
|
||||
|
||||
network = data.google_compute_network.existing_network.id
|
||||
subnetwork = data.google_compute_subnetwork.existing_subnet.id
|
||||
|
||||
# We can't create a cluster with no node pool defined, but we want to only use
|
||||
# separately managed node pools. This allows node pools to be added and removed without recreating the cluster.
|
||||
# So we create the smallest possible default node pool and immediately delete it.
|
||||
remove_default_node_pool = true
|
||||
initial_node_count = 1
|
||||
|
||||
min_master_version = "1.32.2-gke.1297002"
|
||||
|
||||
# Settings for a private cluster.
|
||||
# See internal doc https://bsg-confluence.broadcom.net/pages/viewpage.action?pageId=689720737
|
||||
networking_mode = "VPC_NATIVE"
|
||||
private_cluster_config {
|
||||
enable_private_endpoint = true
|
||||
enable_private_nodes = true
|
||||
}
|
||||
master_authorized_networks_config {
|
||||
cidr_blocks {
|
||||
cidr_block = "10.0.0.0/8"
|
||||
display_name = "corp internal networks"
|
||||
}
|
||||
}
|
||||
ip_allocation_policy {
|
||||
cluster_secondary_range_name = "pods"
|
||||
services_secondary_range_name = "services"
|
||||
}
|
||||
|
||||
addons_config {
|
||||
http_load_balancing {
|
||||
disabled = false
|
||||
}
|
||||
|
||||
horizontal_pod_autoscaling {
|
||||
disabled = false
|
||||
}
|
||||
|
||||
network_policy_config {
|
||||
disabled = false
|
||||
}
|
||||
}
|
||||
|
||||
maintenance_policy {
|
||||
daily_maintenance_window {
|
||||
start_time = "03:00"
|
||||
}
|
||||
}
|
||||
|
||||
network_policy {
|
||||
provider = "CALICO"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
workload_identity_config {
|
||||
workload_pool = "${var.project}.svc.id.goog"
|
||||
}
|
||||
|
||||
cluster_autoscaling {
|
||||
autoscaling_profile = "OPTIMIZE_UTILIZATION"
|
||||
}
|
||||
}
|
||||
|
||||
# See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_node_pool
|
||||
resource "google_container_node_pool" "main" {
|
||||
provider = google-beta
|
||||
for_each = var.node-pools
|
||||
|
||||
location = var.zone
|
||||
cluster = google_container_cluster.main.name
|
||||
name = each.key
|
||||
|
||||
max_pods_per_node = 64
|
||||
|
||||
autoscaling {
|
||||
min_node_count = each.value.min
|
||||
max_node_count = each.value.max
|
||||
}
|
||||
|
||||
management {
|
||||
auto_repair = true
|
||||
auto_upgrade = each.value.auto-upgrade
|
||||
}
|
||||
|
||||
node_config {
|
||||
preemptible = each.value.preemptible
|
||||
machine_type = each.value.machine-type
|
||||
local_ssd_count = each.value.local-ssds
|
||||
disk_size_gb = each.value.disk-size
|
||||
disk_type = each.value.disk-type
|
||||
image_type = each.value.image
|
||||
|
||||
workload_metadata_config {
|
||||
mode = "GKE_METADATA"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
disable-legacy-endpoints = "true"
|
||||
}
|
||||
|
||||
service_account = data.google_service_account.default.email
|
||||
oauth_scopes = [
|
||||
"https://www.googleapis.com/auth/cloud-platform"
|
||||
]
|
||||
|
||||
# Tag to attach appropriate firewall rules.
|
||||
tags = ["gke-broadcom"]
|
||||
}
|
||||
|
||||
timeouts {
|
||||
create = "30m"
|
||||
delete = "30m"
|
||||
}
|
||||
}
|
||||
6
infra/terraform/gcloud/cluster/outputs.tf
Normal file
6
infra/terraform/gcloud/cluster/outputs.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
output "cluster-name" {
|
||||
value = google_container_cluster.main.name
|
||||
}
|
||||
35
infra/terraform/gcloud/cluster/variables.tf
Normal file
35
infra/terraform/gcloud/cluster/variables.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
variable "name" {
|
||||
default = ""
|
||||
description = "The name of the GKE cluster to be created."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
default = ""
|
||||
description = "The zone where the cluster should live."
|
||||
}
|
||||
|
||||
variable "project" {
|
||||
description = "The Google GCP project to host the resources."
|
||||
}
|
||||
|
||||
variable "node-pools" {
|
||||
description = "A list of node pool configurations to create and assign to the cluster."
|
||||
}
|
||||
|
||||
variable "sharedVPCProject" {
|
||||
description = "Name of the GCP project which contains the shared VPC."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "networkName" {
|
||||
description = "Name of the shared VPC network to use for the cluster."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subnetName" {
|
||||
description = "Name of the GCP subnet to use for the cluster."
|
||||
type = string
|
||||
}
|
||||
102
infra/terraform/gcloud/database/main.tf
Normal file
102
infra/terraform/gcloud/database/main.tf
Normal file
@@ -0,0 +1,102 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# A piece of randomization that gets consumed by the
|
||||
# `google_sql_database_instance` resources.
|
||||
#
|
||||
# This is needed in order to facilitate creating and recreating instances
|
||||
# without waiting for the whole period that GCP requires to reuse name.
|
||||
resource "random_id" "instance-name" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
# "data" reads a pre-existing resource without trying to manage its state.
|
||||
data "google_compute_network" "private_network" {
|
||||
provider = google-beta
|
||||
|
||||
project = var.sharedVPCProject
|
||||
name = var.networkName
|
||||
}
|
||||
|
||||
# This API needs to be enabled in our project before creating our Cloud SQL instance,
|
||||
# or else we get error "googleapi: Error 400: Invalid request: Incorrect Service Networking config
|
||||
# for instance: xxx:xxx:SERVICE_NETWORKING_NOT_ENABLED., invalid".
|
||||
# See https://stackoverflow.com/a/66537918.
|
||||
resource "google_project_service" "project" {
|
||||
service = "servicenetworking.googleapis.com"
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_sql_database_instance" "main" {
|
||||
provider = google-beta
|
||||
|
||||
# Allow "terraform destroy" for this db.
|
||||
# deletion_protection = false
|
||||
|
||||
name = "${var.name}-${random_id.instance-name.hex}"
|
||||
region = var.region
|
||||
database_version = "POSTGRES_15"
|
||||
|
||||
settings {
|
||||
availability_type = "ZONAL"
|
||||
disk_autoresize = true
|
||||
disk_type = "PD_SSD"
|
||||
tier = "db-custom-${var.cpus}-${var.memory_mb}"
|
||||
edition = "ENTERPRISE" # cheaper than ENTERPRISE_PLUS
|
||||
|
||||
database_flags {
|
||||
name = "log_min_duration_statement"
|
||||
value = "-1"
|
||||
}
|
||||
|
||||
database_flags {
|
||||
name = "max_connections"
|
||||
value = var.max_connections
|
||||
}
|
||||
|
||||
ip_configuration {
|
||||
# Disable assignment of a public IP address
|
||||
ipv4_enabled = false
|
||||
|
||||
ssl_mode = "ENCRYPTED_ONLY"
|
||||
|
||||
private_network = data.google_compute_network.private_network.self_link
|
||||
|
||||
enable_private_path_for_google_cloud_services = true
|
||||
}
|
||||
|
||||
backup_configuration {
|
||||
enabled = true
|
||||
start_time = "23:00"
|
||||
}
|
||||
|
||||
location_preference {
|
||||
zone = var.zone
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_sql_database" "atc" {
|
||||
name = "atc"
|
||||
|
||||
instance = google_sql_database_instance.main.name
|
||||
charset = "UTF8"
|
||||
collation = "en_US.UTF8"
|
||||
}
|
||||
|
||||
resource "random_string" "password" {
|
||||
length = 32
|
||||
special = true
|
||||
}
|
||||
|
||||
resource "google_sql_user" "user" {
|
||||
name = "atc"
|
||||
|
||||
instance = google_sql_database_instance.main.name
|
||||
password_wo = random_string.password.result
|
||||
}
|
||||
|
||||
resource "google_sql_ssl_cert" "cert" {
|
||||
common_name = "atc"
|
||||
instance = google_sql_database_instance.main.name
|
||||
}
|
||||
30
infra/terraform/gcloud/database/outputs.tf
Normal file
30
infra/terraform/gcloud/database/outputs.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
output "username" {
|
||||
value = google_sql_user.user.name
|
||||
}
|
||||
|
||||
output "password" {
|
||||
sensitive = true
|
||||
value = random_string.password.result
|
||||
}
|
||||
|
||||
output "ip" {
|
||||
value = google_sql_database_instance.main.ip_address[0].ip_address
|
||||
}
|
||||
|
||||
output "ca-cert" {
|
||||
sensitive = true
|
||||
value = google_sql_database_instance.main.server_ca_cert[0].cert
|
||||
}
|
||||
|
||||
output "cert" {
|
||||
sensitive = true
|
||||
value = google_sql_ssl_cert.cert.cert
|
||||
}
|
||||
|
||||
output "private-key" {
|
||||
sensitive = true
|
||||
value = google_sql_ssl_cert.cert.private_key
|
||||
}
|
||||
42
infra/terraform/gcloud/database/variables.tf
Normal file
42
infra/terraform/gcloud/database/variables.tf
Normal file
@@ -0,0 +1,42 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
variable "name" {
|
||||
default = ""
|
||||
description = "The name of the CloudSQL instance to create (ps.: a random ID is appended to this name)."
|
||||
}
|
||||
|
||||
variable "memory_mb" {
|
||||
default = ""
|
||||
description = "Number of MBs to assign to the CloudSQL instance."
|
||||
}
|
||||
|
||||
variable "cpus" {
|
||||
default = ""
|
||||
description = "Number of CPUs to assign to the CloudSQL instance."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
default = ""
|
||||
description = "The zone where this instance is supposed to be created at (e.g., us-central1-a)."
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
default = ""
|
||||
description = "The region where the instance is supposed to be created at (e.g., us-central1)."
|
||||
}
|
||||
|
||||
variable "max_connections" {
|
||||
default = ""
|
||||
description = "The max number of connections allowed by postgres."
|
||||
}
|
||||
|
||||
variable "sharedVPCProject" {
|
||||
description = "Name of the GCP project which contains the shared VPC."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "networkName" {
|
||||
description = "Name of the shared VPC network to use for the db."
|
||||
type = string
|
||||
}
|
||||
33
infra/terraform/gcloud/gcp.tf
Normal file
33
infra/terraform/gcloud/gcp.tf
Normal file
@@ -0,0 +1,33 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
google = "~> 6"
|
||||
google-beta = "~> 6"
|
||||
}
|
||||
|
||||
backend "gcs" {
|
||||
# By not providing credentials, you will use your current identity from the gcloud CLI.
|
||||
# credentials = "gcp.json"
|
||||
bucket = "pinniped-ci-terraform-state"
|
||||
prefix = "pinniped-concourse"
|
||||
}
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
# By not providing credentials, you will use your current identity from the gcloud CLI.
|
||||
# credentials = "gcp.json"
|
||||
project = var.project
|
||||
region = var.region
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
# `google-beta` provides us access to GCP's beta APIs.
|
||||
provider "google-beta" {
|
||||
# By not providing credentials, you will use your current identity from the gcloud CLI.
|
||||
# credentials = "gcp.json"
|
||||
project = var.project
|
||||
region = var.region
|
||||
zone = var.zone
|
||||
}
|
||||
68
infra/terraform/gcloud/main.tf
Normal file
68
infra/terraform/gcloud/main.tf
Normal file
@@ -0,0 +1,68 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Create the static IP.
|
||||
module "address" {
|
||||
source = "./address"
|
||||
|
||||
sharedVPCProject = var.sharedVPCProject
|
||||
concourseSubnetName = var.concourseSubnetName
|
||||
}
|
||||
|
||||
# Create the GKE Kubernetes cluster.
|
||||
module "cluster" {
|
||||
source = "./cluster"
|
||||
|
||||
name = "pinniped-concourse"
|
||||
project = var.project
|
||||
zone = var.zone
|
||||
|
||||
sharedVPCProject = var.sharedVPCProject
|
||||
networkName = var.networkName
|
||||
subnetName = var.concourseSubnetName
|
||||
|
||||
node-pools = {
|
||||
|
||||
"generic-1" = {
|
||||
auto-upgrade = true
|
||||
disk-size = "50"
|
||||
disk-type = "pd-ssd"
|
||||
image = "COS_CONTAINERD"
|
||||
local-ssds = 0
|
||||
machine-type = "e2-highcpu-8" # 8 vCPU and 4 GB memory
|
||||
max = 2
|
||||
min = 1
|
||||
preemptible = false
|
||||
version = "1.32.2-gke.1297002"
|
||||
},
|
||||
|
||||
"workers-1" = {
|
||||
auto-upgrade = true
|
||||
disk-size = "100"
|
||||
disk-type = "pd-ssd"
|
||||
image = "UBUNTU_CONTAINERD"
|
||||
local-ssds = 0
|
||||
machine-type = "c3-standard-8" # 8 vCPU and 32 GB memory
|
||||
max = 5
|
||||
min = 1
|
||||
preemptible = false
|
||||
version = "1.32.2-gke.1297002"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
# Creates the CloudSQL Postgres database to be used by the Concourse deployment.
|
||||
module "database" {
|
||||
source = "./database"
|
||||
|
||||
name = "pinniped-concourse"
|
||||
region = var.region
|
||||
zone = var.zone
|
||||
|
||||
sharedVPCProject = var.sharedVPCProject
|
||||
networkName = var.networkName
|
||||
|
||||
cpus = "4"
|
||||
memory_mb = "7680"
|
||||
max_connections = "300"
|
||||
}
|
||||
50
infra/terraform/gcloud/outputs.tf
Normal file
50
infra/terraform/gcloud/outputs.tf
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
output "project" {
|
||||
value = var.project
|
||||
}
|
||||
|
||||
output "region" {
|
||||
value = var.region
|
||||
}
|
||||
|
||||
output "zone" {
|
||||
value = var.zone
|
||||
}
|
||||
|
||||
output "web-ip" {
|
||||
value = module.address.ip
|
||||
}
|
||||
|
||||
output "database-ip" {
|
||||
value = module.database.ip
|
||||
}
|
||||
|
||||
output "database-ca-cert" {
|
||||
sensitive = true
|
||||
value = module.database.ca-cert
|
||||
}
|
||||
|
||||
output "database-username" {
|
||||
value = module.database.username
|
||||
}
|
||||
|
||||
output "database-password" {
|
||||
sensitive = true
|
||||
value = module.database.password
|
||||
}
|
||||
|
||||
output "database-cert" {
|
||||
sensitive = true
|
||||
value = module.database.cert
|
||||
}
|
||||
|
||||
output "database-private-key" {
|
||||
sensitive = true
|
||||
value = module.database.private-key
|
||||
}
|
||||
|
||||
output "cluster-name" {
|
||||
value = module.cluster.cluster-name
|
||||
}
|
||||
36
infra/terraform/gcloud/variables.tf
Normal file
36
infra/terraform/gcloud/variables.tf
Normal file
@@ -0,0 +1,36 @@
|
||||
# Copyright 2023-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
variable "project" {
|
||||
description = "The Google GCP project to host the resources."
|
||||
type = string
|
||||
# Please provide the value of this variable by setting the env var TF_VAR_project for all terraform commands.
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
description = "The cloud provider region where the resources created."
|
||||
default = "us-west1"
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
description = "The cloud provider zone where the resources are created."
|
||||
default = "us-west1-c"
|
||||
}
|
||||
|
||||
variable "sharedVPCProject" {
|
||||
description = "Name of the GCP project which contains the shared VPC."
|
||||
type = string
|
||||
# Please provide the value of this variable by setting the env var TF_VAR_sharedVPCProject for all terraform commands.
|
||||
}
|
||||
|
||||
variable "networkName" {
|
||||
description = "Name of the shared VPC network."
|
||||
type = string
|
||||
# Please provide the value of this variable by setting the env var TF_VAR_networkName for all terraform commands.
|
||||
}
|
||||
|
||||
variable "concourseSubnetName" {
|
||||
description = "Name of the GCP subnet to use for concourse."
|
||||
type = string
|
||||
# Please provide the value of this variable by setting the env var TF_VAR_concourseSubnetName for all terraform commands.
|
||||
}
|
||||
68
pipelines/cleanup-aws/pipeline.yml
Normal file
68
pipelines/cleanup-aws/pipeline.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
display:
|
||||
|
||||
background_image: https://upload.wikimedia.org/wikipedia/commons/9/9d/Seal_cleaning_itself.jpg
|
||||
|
||||
resources:
|
||||
|
||||
- name: pinniped-ci
|
||||
type: git
|
||||
icon: github
|
||||
source:
|
||||
uri: https://github.com/vmware/pinniped.git
|
||||
branch: ci
|
||||
username: ((ci-bot-access-token-with-read-only-public-repos))
|
||||
|
||||
jobs:
|
||||
|
||||
# Here is a recommendation for how to use these tasks to clean up our AWS
|
||||
# environment.
|
||||
#
|
||||
# 1. Run dryrun-cleanup-aws and look at the listed resources to make sure you aren't
|
||||
# deleting anything that you don't want to.
|
||||
# 2. Run danger-danger-cleanup-aws to actually delete resources.
|
||||
# 3. Run list-all-aws-resources to view ALL resources left in our AWS account.
|
||||
# Consider if we want to add any of those resources to our cleanup task's config.
|
||||
|
||||
- name: danger-danger-cleanup-aws
|
||||
public: false # hide logs
|
||||
serial: true
|
||||
plan:
|
||||
- get: pinniped-ci
|
||||
- task: cleanup-aws
|
||||
file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml
|
||||
params:
|
||||
AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number))
|
||||
AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id))
|
||||
AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret))
|
||||
AWS_ROLE_ARN: ((aws-cleanup-role-arn))
|
||||
REALLY_CLEANUP: "yes"
|
||||
|
||||
- name: dryrun-cleanup-aws
|
||||
public: false # hide logs
|
||||
serial: true
|
||||
plan:
|
||||
- get: pinniped-ci
|
||||
- task: preview-cleanup-aws-without-actually-deleting-anything
|
||||
file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml
|
||||
params:
|
||||
AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number))
|
||||
AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id))
|
||||
AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret))
|
||||
AWS_ROLE_ARN: ((aws-cleanup-role-arn))
|
||||
|
||||
- name: list-all-aws-resources
|
||||
public: false # hide logs
|
||||
serial: true
|
||||
plan:
|
||||
- get: pinniped-ci
|
||||
- task: list-all-aws-resources
|
||||
file: pinniped-ci/pipelines/shared-tasks/cleanup-aws/task.yml
|
||||
params:
|
||||
AWS_ACCOUNT_NUMBER: ((aws-cleanup-account-number))
|
||||
AWS_ACCESS_KEY_ID: ((aws-cleanup-iam-key-id))
|
||||
AWS_SECRET_ACCESS_KEY: ((aws-cleanup-iam-key-secret))
|
||||
AWS_ROLE_ARN: ((aws-cleanup-role-arn))
|
||||
ALL_RESOURCES: "yes"
|
||||
12
pipelines/cleanup-aws/update-pipeline.sh
Executable file
12
pipelines/cleanup-aws/update-pipeline.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
180
pipelines/concourse-workers/pipeline.yml
Normal file
180
pipelines/concourse-workers/pipeline.yml
Normal file
@@ -0,0 +1,180 @@
|
||||
# Copyright 2020-2026 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
display:
|
||||
|
||||
background_image: https://cdn.pixabay.com/photo/2020/09/16/22/09/pool-5577567_1280.jpg
|
||||
|
||||
meta:
|
||||
|
||||
# GCP account info and which zone the workers should be created in and deleted from.
|
||||
gke_admin_params: &gke_admin_params
|
||||
INSTANCE_ZONE: us-west1-c
|
||||
PINNIPED_GCP_PROJECT: ((gcp-project-name))
|
||||
GCP_USERNAME: ((gcp-instance-admin-username))
|
||||
GCP_JSON_KEY: ((gcp-instance-admin-json-key))
|
||||
|
||||
# GCP account info and which zone the workers should be created in and deleted from.
|
||||
gcp_account_params: &gcp_account_params
|
||||
INSTANCE_ZONE: us-west1-a
|
||||
GCP_PROJECT: ((gcp-project-name))
|
||||
GCP_USERNAME: ((gcp-instance-admin-username))
|
||||
GCP_JSON_KEY: ((gcp-instance-admin-json-key))
|
||||
|
||||
# GKE account info and which zone the clusters should be created in and deleted from.
|
||||
gke_account_params: &gke_account_params
|
||||
CLUSTER_REGION: us-west1
|
||||
CLUSTER_ZONE: us-west1-c
|
||||
GCP_PROJECT: ((gcp-project-name))
|
||||
GCP_SERVICE_ACCOUNT: ((gcp-instance-admin-username))
|
||||
GCP_JSON_KEY: ((gcp-instance-admin-json-key))
|
||||
|
||||
# Azure account info and which resource group the clusters should be created in and deleted from.
|
||||
azure_account_params: &azure_account_params
|
||||
AZURE_SUBSCRIPTION_ID: ((azure-bot-subscription-id))
|
||||
AZURE_TENANT: ((azure-bot-tenant-id))
|
||||
AZURE_RESOURCE_GROUP: pinniped-ci
|
||||
AZURE_USERNAME: ((azure-bot-app-id))
|
||||
AZURE_PASSWORD: ((azure-bot-password))
|
||||
|
||||
resources:
|
||||
|
||||
- name: pinniped-ci
|
||||
type: git
|
||||
icon: github
|
||||
source:
|
||||
uri: https://github.com/vmware/pinniped.git
|
||||
branch: ci
|
||||
username: ((ci-bot-access-token-with-read-only-public-repos))
|
||||
|
||||
- name: k8s-app-deployer-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 5m
|
||||
source:
|
||||
repository: ((ci-ghcr-registry))/k8s-app-deployer
|
||||
username: ((ci-ghcr-pusher-username))
|
||||
password: ((ci-ghcr-pusher-token))
|
||||
tag: latest
|
||||
|
||||
- name: gcloud-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 5m
|
||||
source:
|
||||
repository: google/cloud-sdk
|
||||
tag: slim
|
||||
|
||||
# - name: aks-deployer-image
|
||||
# type: registry-image
|
||||
# icon: docker
|
||||
# check_every: 5m
|
||||
# source:
|
||||
# repository: mcr.microsoft.com/azure-cli
|
||||
|
||||
- name: hourly
|
||||
type: time
|
||||
icon: calendar-clock
|
||||
check_every: 10m
|
||||
source:
|
||||
interval: 1h
|
||||
|
||||
# In an effort to save money, no longer automatically scale our workers up and down on a schedule.
|
||||
|
||||
# - name: end-of-business-day
|
||||
# type: time
|
||||
# icon: calendar-clock
|
||||
# source:
|
||||
# location: America/Los_Angeles
|
||||
# start: 7:00 PM
|
||||
# stop: 8:00 PM
|
||||
# days: [ Monday, Tuesday, Wednesday, Thursday, Friday ]
|
||||
#
|
||||
# - name: start-of-business-day
|
||||
# type: time
|
||||
# icon: calendar-clock
|
||||
# source:
|
||||
# location: America/New_York
|
||||
# start: 5:30 AM
|
||||
# stop: 6:30 AM
|
||||
# days: [ Monday, Tuesday, Wednesday, Thursday, Friday ]
|
||||
|
||||
jobs:
|
||||
|
||||
- name: scale-up-internal-workers
|
||||
public: true # all logs are publicly visible
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: pinniped-ci
|
||||
- get: k8s-app-deployer-image
|
||||
# - get: start-of-business-day
|
||||
# trigger: true
|
||||
- task: scale-up
|
||||
timeout: 30m
|
||||
file: pinniped-ci/pipelines/concourse-workers/scale-up-gke-replicas.yml
|
||||
image: k8s-app-deployer-image
|
||||
params:
|
||||
<<: *gke_admin_params
|
||||
|
||||
- name: scale-down-internal-workers
|
||||
public: true # all logs are publicly visible
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: pinniped-ci
|
||||
- get: k8s-app-deployer-image
|
||||
# - get: end-of-business-day
|
||||
# trigger: true
|
||||
- task: scale-down
|
||||
timeout: 30m
|
||||
file: pinniped-ci/pipelines/concourse-workers/scale-down-gke-replicas.yml
|
||||
image: k8s-app-deployer-image
|
||||
params:
|
||||
<<: *gke_admin_params
|
||||
|
||||
- name: remove-orphaned-vms
|
||||
public: true # all logs are publicly visible
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: pinniped-ci
|
||||
- get: gcloud-image
|
||||
- get: hourly
|
||||
trigger: true
|
||||
- task: remove-orphaned-kind-cluster-vms
|
||||
attempts: 2
|
||||
timeout: 25m
|
||||
file: pinniped-ci/pipelines/shared-tasks/remove-orphaned-kind-cluster-vms/task.yml
|
||||
image: gcloud-image
|
||||
params:
|
||||
<<: *gcp_account_params
|
||||
|
||||
- name: remove-orphaned-gke-clusters
|
||||
public: true # all logs are publicly visible
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: pinniped-ci
|
||||
- get: gcloud-image
|
||||
- get: hourly
|
||||
trigger: true
|
||||
- task: remove-orphaned-gke-clusters
|
||||
attempts: 2
|
||||
timeout: 25m
|
||||
file: pinniped-ci/pipelines/shared-tasks/remove-orphaned-gke-clusters/task.yml
|
||||
image: gcloud-image
|
||||
params:
|
||||
<<: *gke_account_params
|
||||
|
||||
# - name: remove-orphaned-aks-clusters
|
||||
# public: true # all logs are publicly visible
|
||||
# plan:
|
||||
# - in_parallel:
|
||||
# - get: pinniped-ci
|
||||
# - get: aks-deployer-image
|
||||
# - get: hourly
|
||||
# trigger: true
|
||||
# - task: remove-orphaned-aks-clusters
|
||||
# attempts: 2
|
||||
# timeout: 25m
|
||||
# file: pinniped-ci/pipelines/shared-tasks/remove-orphaned-aks-clusters/task.yml
|
||||
# image: aks-deployer-image
|
||||
# params:
|
||||
# <<: *azure_account_params
|
||||
13
pipelines/concourse-workers/scale-down-gke-replicas.yml
Normal file
13
pipelines/concourse-workers/scale-down-gke-replicas.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
---
|
||||
platform: linux
|
||||
inputs:
|
||||
- name: pinniped-ci
|
||||
params:
|
||||
PINNIPED_GCP_PROJECT:
|
||||
GCP_SERVICE_ACCOUNT:
|
||||
GCP_JSON_KEY:
|
||||
run:
|
||||
path: pinniped-ci/infra/concourse-install/scale-down-concourse-internal-workers.sh
|
||||
13
pipelines/concourse-workers/scale-up-gke-replicas.yml
Normal file
13
pipelines/concourse-workers/scale-up-gke-replicas.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
---
|
||||
platform: linux
|
||||
inputs:
|
||||
- name: pinniped-ci
|
||||
params:
|
||||
PINNIPED_GCP_PROJECT:
|
||||
GCP_SERVICE_ACCOUNT:
|
||||
GCP_JSON_KEY:
|
||||
run:
|
||||
path: pinniped-ci/infra/concourse-install/scale-up-concourse-internal-workers.sh
|
||||
12
pipelines/concourse-workers/update-pipeline.sh
Executable file
12
pipelines/concourse-workers/update-pipeline.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
1305
pipelines/dockerfile-builders/pipeline.yml
Normal file
1305
pipelines/dockerfile-builders/pipeline.yml
Normal file
File diff suppressed because it is too large
Load Diff
16
pipelines/dockerfile-builders/update-pipeline.sh
Executable file
16
pipelines/dockerfile-builders/update-pipeline.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
ensure_time_resource_has_at_least_one_version "$pipeline" daily
|
||||
|
||||
# Make the pipeline visible to non-authenticated users in the web UI.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"
|
||||
115
pipelines/kind-node-builder/pipeline.yml
Normal file
115
pipelines/kind-node-builder/pipeline.yml
Normal file
@@ -0,0 +1,115 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
display:
|
||||
|
||||
background_image: https://upload.wikimedia.org/wikipedia/commons/2/2b/Grey_seal_animal_halichoerus_grypus.jpg
|
||||
|
||||
meta:
|
||||
|
||||
notify_on_failure: ¬ify_on_failure
|
||||
on_failure:
|
||||
put: gchat
|
||||
timeout: 5m
|
||||
params:
|
||||
text: |
|
||||
Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :(
|
||||
${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME}
|
||||
|
||||
# GCP account info and which zone the workers should be created in and deleted from.
|
||||
gcp_account_params: &gcp_account_params
|
||||
INSTANCE_ZONE: us-west1-a
|
||||
GCP_PROJECT: ((gcp-project-name))
|
||||
GCP_USERNAME: ((gcp-instance-admin-username))
|
||||
GCP_JSON_KEY: ((gcp-instance-admin-json-key))
|
||||
|
||||
resource_types:
|
||||
|
||||
- name: google-chat-notify-resource
|
||||
type: docker-image
|
||||
source:
|
||||
repository: springio/google-chat-notify-resource
|
||||
tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags
|
||||
# We are only doing pulls of this resource type, but add the username and password to avoid
|
||||
# hitting a rate limit. Our free account is only allowed to have one access token, so we
|
||||
# cannot make a read-only token for performing pulls.
|
||||
username: getpinniped
|
||||
password: ((getpinniped-dockerhub-image-push-access-token))
|
||||
|
||||
resources:
|
||||
|
||||
- name: gcloud-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 5m
|
||||
source:
|
||||
repository: google/cloud-sdk
|
||||
tag: slim
|
||||
|
||||
- name: pinniped-ci
|
||||
type: git
|
||||
icon: github
|
||||
source:
|
||||
uri: https://github.com/vmware/pinniped.git
|
||||
branch: ci
|
||||
username: ((ci-bot-access-token-with-read-only-public-repos))
|
||||
|
||||
- name: daily
|
||||
type: time
|
||||
icon: calendar-clock
|
||||
check_every: 10m
|
||||
source:
|
||||
location: America/Los_Angeles
|
||||
start: 1:00 AM
|
||||
stop: 2:00 AM
|
||||
days: [ Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday ]
|
||||
|
||||
- name: gchat
|
||||
type: google-chat-notify-resource
|
||||
icon: chat-outline
|
||||
source:
|
||||
url: ((gchat-project-pinniped-bots-webhook-url))
|
||||
|
||||
jobs:
|
||||
|
||||
- name: build-kind-node-image-kube-main-latest
|
||||
public: true # all logs are publicly visible
|
||||
<<: *notify_on_failure
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: pinniped-ci
|
||||
- get: gcloud-image
|
||||
- get: daily
|
||||
trigger: true
|
||||
- task: create-kind-node-builder-vm
|
||||
timeout: 30m
|
||||
file: pinniped-ci/pipelines/shared-tasks/create-kind-node-builder-vm/task.yml
|
||||
image: gcloud-image
|
||||
params:
|
||||
SHARED_VPC_PROJECT: ((shared-vpc-project))
|
||||
SUBNET_REGION: ((subnet-region))
|
||||
SUBNET_NAME: ((instances-subnet-name))
|
||||
DISK_IMAGES_PROJECT: ((disk-images-gcp-project-name))
|
||||
<<: *gcp_account_params
|
||||
- task: build-kind-node-image
|
||||
timeout: 90m
|
||||
file: pinniped-ci/pipelines/shared-tasks/build-kind-node-image/task.yml
|
||||
image: gcloud-image
|
||||
input_mapping:
|
||||
instance: create-kind-node-builder-vm-output
|
||||
params:
|
||||
PUSH_TO_IMAGE_REGISTRY: "ghcr.io"
|
||||
PUSH_TO_IMAGE_REPO: "pinniped-ci-bot/kind-node-image"
|
||||
DOCKER_USERNAME: ((ci-ghcr-pusher-username))
|
||||
DOCKER_PASSWORD: ((ci-ghcr-pusher-token))
|
||||
<<: *gcp_account_params
|
||||
ensure:
|
||||
task: remove-instance
|
||||
attempts: 2
|
||||
timeout: 20m
|
||||
file: pinniped-ci/pipelines/shared-tasks/remove-gce-worker-vm/task.yml
|
||||
image: gcloud-image
|
||||
input_mapping:
|
||||
concourse-worker-pool: create-kind-node-builder-vm-output
|
||||
params:
|
||||
<<: *gcp_account_params
|
||||
16
pipelines/kind-node-builder/update-pipeline.sh
Executable file
16
pipelines/kind-node-builder/update-pipeline.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
ensure_time_resource_has_at_least_one_version "$pipeline" daily
|
||||
|
||||
# Make the pipeline visible to non-authenticated users in the web UI.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"
|
||||
3265
pipelines/main/pipeline.yml
Normal file
3265
pipelines/main/pipeline.yml
Normal file
File diff suppressed because it is too large
Load Diff
16
pipelines/main/update-pipeline.sh
Executable file
16
pipelines/main/update-pipeline.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
ensure_time_resource_has_at_least_one_version "$pipeline" weekdays
|
||||
|
||||
# Make the pipeline visible to non-authenticated users in the web UI.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"
|
||||
1914
pipelines/pull-requests/pipeline.yml
Normal file
1914
pipelines/pull-requests/pipeline.yml
Normal file
File diff suppressed because it is too large
Load Diff
15
pipelines/pull-requests/update-pipeline.sh
Executable file
15
pipelines/pull-requests/update-pipeline.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
|
||||
# Make the pipeline visible to non-authenticated users in the web UI.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"
|
||||
292
pipelines/security-scan/pipeline.yml
Normal file
292
pipelines/security-scan/pipeline.yml
Normal file
@@ -0,0 +1,292 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
display:
|
||||
|
||||
background_image: https://upload.wikimedia.org/wikipedia/commons/d/d0/KelpforestI2500ppx.JPG
|
||||
|
||||
meta:
|
||||
|
||||
notify_on_failure: ¬ify_on_failure
|
||||
on_failure:
|
||||
put: gchat
|
||||
timeout: 5m
|
||||
params:
|
||||
text: |
|
||||
Job `${BUILD_PIPELINE_NAME}/${BUILD_JOB_NAME}` *FAILED* :(
|
||||
${ATC_EXTERNAL_URL}/teams/${BUILD_TEAM_NAME}/pipelines/${BUILD_PIPELINE_NAME}/jobs/${BUILD_JOB_NAME}/builds/${BUILD_NAME}
|
||||
|
||||
trivy_ignores: &trivy_ignores
|
||||
IGNORE_VULNERABILITY_IDS: |
|
||||
# Medium CVE in gopkg.in/square/go-jose.v2. That project is archived, so they will never fix this.
|
||||
# That is an indirect dependency of our project, which we inherit from our direct dep of k8s.io/apiserver@v0.31.2.
|
||||
# However, the Kubernetes maintainers say that k8s is not impacted and they won't upgrade to v3.
|
||||
# See https://github.com/kubernetes/kubernetes/issues/128039. So we will ignore this for now.
|
||||
CVE-2024-28180
|
||||
|
||||
resource_types:
|
||||
|
||||
# Try using the latest version of the registry-image resource because of this problem:
|
||||
# https://vmware.slack.com/archives/C6TL2PMC7/p1702052766131149
|
||||
- name: registry-image
|
||||
type: registry-image
|
||||
source:
|
||||
repository: concourse/registry-image-resource
|
||||
tag: latest
|
||||
# We are only doing pulls of this resource type, but add the username and password to avoid
|
||||
# hitting a rate limit. Our free account is only allowed to have one access token, so we
|
||||
# cannot make a read-only token for performing pulls.
|
||||
username: getpinniped
|
||||
password: ((getpinniped-dockerhub-image-push-access-token))
|
||||
|
||||
- name: google-chat-notify-resource
|
||||
type: docker-image
|
||||
source:
|
||||
repository: springio/google-chat-notify-resource
|
||||
tag: 0.0.1-SNAPSHOT # see https://hub.docker.com/r/springio/google-chat-notify-resource/tags
|
||||
# We are only doing pulls of this resource type, but add the username and password to avoid
|
||||
# hitting a rate limit. Our free account is only allowed to have one access token, so we
|
||||
# cannot make a read-only token for performing pulls.
|
||||
username: getpinniped
|
||||
password: ((getpinniped-dockerhub-image-push-access-token))
|
||||
|
||||
resources:
|
||||
|
||||
- name: pinniped-source
|
||||
type: git
|
||||
icon: github
|
||||
source:
|
||||
uri: https://github.com/vmware/pinniped.git
|
||||
branch: main
|
||||
|
||||
- name: pinniped-ci
|
||||
type: git
|
||||
icon: github
|
||||
source:
|
||||
uri: https://github.com/vmware/pinniped.git
|
||||
branch: ci
|
||||
username: ((ci-bot-access-token-with-read-only-public-repos))
|
||||
|
||||
- name: pinniped-latest-release-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 10m
|
||||
source:
|
||||
repository: ghcr.io/vmware/pinniped/pinniped-server
|
||||
tag: latest
|
||||
|
||||
- name: pinniped-latest-main-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 10m
|
||||
source:
|
||||
repository: ((ci-ghcr-registry))/ci-build
|
||||
username: ((ci-ghcr-puller-username))
|
||||
password: ((ci-ghcr-puller-token))
|
||||
tag: latest
|
||||
|
||||
- name: gh-cli-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 10m
|
||||
source:
|
||||
repository: ((ci-ghcr-registry))/gh-cli
|
||||
username: ((ci-ghcr-puller-username))
|
||||
password: ((ci-ghcr-puller-token))
|
||||
tag: latest
|
||||
|
||||
- name: golang-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 10m
|
||||
source:
|
||||
repository: docker.io/golang
|
||||
|
||||
- name: crane-image
|
||||
type: registry-image
|
||||
icon: docker
|
||||
check_every: 10m
|
||||
source:
|
||||
repository: ((ci-ghcr-registry))/crane
|
||||
username: ((ci-ghcr-puller-username))
|
||||
password: ((ci-ghcr-puller-token))
|
||||
|
||||
- name: weekdays
|
||||
type: time
|
||||
icon: calendar-clock
|
||||
check_every: 10m
|
||||
source:
|
||||
location: America/Los_Angeles
|
||||
start: 6:00 AM
|
||||
stop: 7:00 AM
|
||||
days: [ Monday, Tuesday, Wednesday, Thursday, Friday ]
|
||||
|
||||
- name: gchat
|
||||
type: google-chat-notify-resource
|
||||
icon: chat-outline
|
||||
source:
|
||||
url: ((gchat-project-pinniped-bots-webhook-url))
|
||||
|
||||
jobs:
|
||||
|
||||
- name: nancy-main
|
||||
public: true # all logs are publicly visible
|
||||
serial: true
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: weekdays
|
||||
trigger: true
|
||||
- get: pinniped-source
|
||||
- task: get-modules
|
||||
config:
|
||||
platform: linux
|
||||
image_resource:
|
||||
type: registry-image
|
||||
source:
|
||||
repository: docker.io/golang
|
||||
inputs:
|
||||
- name: pinniped-source
|
||||
outputs:
|
||||
- name: pinniped-modules
|
||||
run:
|
||||
dir: "pinniped-source"
|
||||
path: sh
|
||||
args:
|
||||
- "-c"
|
||||
- |
|
||||
set -e
|
||||
echo "Installing jq..."
|
||||
( apt-get update -y && apt-get install -y jq ) 2>&1 > install.log || cat install.log
|
||||
|
||||
# Use 'go list' to find package dependencies, then select the associated module versions.
|
||||
# See https://github.com/sonatype-nexus-community/nancy/issues/228 for details about why
|
||||
# we can't just use 'go list -mod -json all'.
|
||||
echo "Listing Go module dependencies..."
|
||||
go list -deps -json all | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' > ../pinniped-modules/modules.json
|
||||
- task: scan
|
||||
config:
|
||||
platform: linux
|
||||
image_resource:
|
||||
type: registry-image
|
||||
source:
|
||||
repository: docker.io/sonatypecommunity/nancy
|
||||
tag: alpine
|
||||
inputs:
|
||||
- name: pinniped-modules
|
||||
params:
|
||||
SONATYPE_API_KEY: ((sonatype-api-key))
|
||||
SONATYPE_USERNAME: ((sonatype-username))
|
||||
run:
|
||||
path: 'sh'
|
||||
args:
|
||||
- '-c'
|
||||
- |
|
||||
set -e
|
||||
cat <<EOF > exclusions.txt
|
||||
# Vulnerability exclusions for Nancy:
|
||||
# https://github.com/sonatype-nexus-community/nancy#exclude-vulnerabilities
|
||||
#
|
||||
# When editing this, please add an `until=` tag on each entry so we remember to revisit
|
||||
# and clean this file later.
|
||||
# CVE-0000-00000 until=2022-01-01
|
||||
#
|
||||
# CVE-2020-8561 is in k8s.io/apiserver@v0.27.1,
|
||||
# which is the latest version as of 2023-05-10.
|
||||
# From the comments on this issue https://github.com/kubernetes/kubernetes/issues/104720
|
||||
# it seems like the Kubernetes maintainers are never going to fix it.
|
||||
# Removing the "until" date on the next line to ignore this CVE forever.
|
||||
CVE-2020-8561
|
||||
EOF
|
||||
|
||||
cat pinniped-modules/modules.json | nancy sleuth \
|
||||
--exclude-vulnerability-file=exclusions.txt \
|
||||
--token ${SONATYPE_API_KEY} \
|
||||
--username ${SONATYPE_USERNAME}
|
||||
|
||||
- name: trivy-release
|
||||
public: true # all logs are publicly visible
|
||||
serial: true
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: weekdays
|
||||
trigger: true
|
||||
- get: pinniped-latest-release-image
|
||||
params:
|
||||
format: oci
|
||||
- get: pinniped-ci
|
||||
- task: scan
|
||||
file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml
|
||||
params:
|
||||
GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission))
|
||||
<<: *trivy_ignores
|
||||
input_mapping:
|
||||
image: pinniped-latest-release-image
|
||||
|
||||
- name: trivy-main
|
||||
public: true # all logs are publicly visible
|
||||
serial: true
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: weekdays
|
||||
trigger: true
|
||||
- get: pinniped-latest-main-image
|
||||
params:
|
||||
format: oci
|
||||
- get: pinniped-ci
|
||||
- task: scan
|
||||
file: pinniped-ci/pipelines/shared-tasks/scan-image-trivy/task.yml
|
||||
params:
|
||||
GITHUB_TOKEN: ((ci-bot-access-token-with-read-user-permission))
|
||||
<<: *trivy_ignores
|
||||
input_mapping:
|
||||
image: pinniped-latest-main-image
|
||||
|
||||
- name: all-golang-deps-updated
|
||||
public: true # all logs are publicly visible
|
||||
<<: *notify_on_failure
|
||||
serial: true
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: weekdays
|
||||
trigger: true
|
||||
- get: pinniped-source
|
||||
- get: pinniped-ci
|
||||
- get: gh-cli-image
|
||||
- get: crane-image
|
||||
- get: golang-image
|
||||
params:
|
||||
skip_download: true
|
||||
- task: check-golang-deps-updated
|
||||
file: pinniped-ci/pipelines/shared-tasks/check-golang-deps-updated/task.yml
|
||||
input_mapping:
|
||||
pinniped-in: pinniped-source
|
||||
- task: check-dockerfile-deps-updated
|
||||
image: crane-image
|
||||
file: pinniped-ci/pipelines/shared-tasks/check-dockerfile-deps-updated/task.yml
|
||||
input_mapping:
|
||||
pinniped-in: pinniped-out # the output of the previous task
|
||||
- task: create-or-update-pr
|
||||
image: gh-cli-image
|
||||
file: pinniped-ci/pipelines/shared-tasks/create-or-update-pr/task.yml
|
||||
params:
|
||||
GH_TOKEN: ((ci-bot-access-token-with-public-repo-write-permission))
|
||||
BRANCH: "pinny/bump-deps"
|
||||
COMMIT_MESSAGE: "Bump dependencies"
|
||||
PR_TITLE: "Bump dependencies"
|
||||
PR_BODY: "Automatically bumped all go.mod direct dependencies and/or images in dockerfiles."
|
||||
input_mapping:
|
||||
pinniped: pinniped-out
|
||||
|
||||
- name: run-go-vuln-scan
|
||||
public: true # all logs are publicly visible
|
||||
plan:
|
||||
- in_parallel:
|
||||
- get: pinniped-source
|
||||
trigger: true
|
||||
- get: pinniped-ci
|
||||
- task: run-go-vuln-scan
|
||||
file: pinniped-ci/pipelines/shared-tasks/run-go-vuln-scan/task.yml
|
||||
input_mapping:
|
||||
pinniped: pinniped-source
|
||||
params:
|
||||
BUILD_TAGS:
|
||||
16
pipelines/security-scan/update-pipeline.sh
Executable file
16
pipelines/security-scan/update-pipeline.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
pipeline=$(basename "$script_dir")
|
||||
source "$script_dir/../../hack/fly-helpers.sh"
|
||||
|
||||
set_pipeline "$pipeline" "$script_dir/pipeline.yml"
|
||||
ensure_time_resource_has_at_least_one_version "$pipeline" weekdays
|
||||
|
||||
# Make the pipeline visible to non-authenticated users in the web UI.
|
||||
$FLY_CLI --target "$CONCOURSE_TARGET" expose-pipeline --pipeline "$pipeline"
|
||||
1322
pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh
Executable file
1322
pipelines/shared-helpers/prepare-cluster-for-integration-tests.sh
Executable file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user