mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-17 03:03:09 +00:00
Compare commits
529 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
efe420b737 | ||
|
|
42e74a02e9 | ||
|
|
70480260dd | ||
|
|
82f8094de7 | ||
|
|
434e3fe435 | ||
|
|
b21b43c654 | ||
|
|
9e0195e024 | ||
|
|
d853cbc7ff | ||
|
|
9ed52e6b4a | ||
|
|
fab36c55f5 | ||
|
|
409d10baf8 | ||
|
|
ea762b405d | ||
|
|
3ff605bb39 | ||
|
|
856971e452 | ||
|
|
eaf2d9a185 | ||
|
|
3f06be2246 | ||
|
|
69137fb6b9 | ||
|
|
253d3bb36f | ||
|
|
9f80b0ea00 | ||
|
|
6f4cf705e5 | ||
|
|
ec3e4cae68 | ||
|
|
381811b36f | ||
|
|
906a88f2d3 | ||
|
|
0f8437bc3a | ||
|
|
6d047c151f | ||
|
|
9735122db9 | ||
|
|
4948e1702f | ||
|
|
406f2723ce | ||
|
|
6c555f94e3 | ||
|
|
f8e872d1af | ||
|
|
3e45bfc97d | ||
|
|
a55e9de4fc | ||
|
|
eb0d9a15fc | ||
|
|
6063674623 | ||
|
|
d574fe05ba | ||
|
|
4369cc9ff2 | ||
|
|
adf263b566 | ||
|
|
4edda802e5 | ||
|
|
db9a97721f | ||
|
|
3578d7cb9a | ||
|
|
83920db502 | ||
|
|
1a4f9e3466 | ||
|
|
e574a99c5e | ||
|
|
16ef2baf8a | ||
|
|
9beb3855b5 | ||
|
|
81f2362543 | ||
|
|
07f0181fa3 | ||
|
|
481308215d | ||
|
|
381fd51e13 | ||
|
|
541336b997 | ||
|
|
6cdd4a9506 | ||
|
|
fbe0551426 | ||
|
|
164f64a370 | ||
|
|
526be79b11 | ||
|
|
820f1e977e | ||
|
|
50258fc569 | ||
|
|
0d3ad0085d | ||
|
|
cfb76a538c | ||
|
|
e18b6fdddc | ||
|
|
5a608cc84c | ||
|
|
49145791cc | ||
|
|
6989e5da63 | ||
|
|
a2365b1cce | ||
|
|
80a520390b | ||
|
|
86e1c99dcd | ||
|
|
78ac27c262 | ||
|
|
f86a5244a6 | ||
|
|
907ccb68f5 | ||
|
|
98490b1a1b | ||
|
|
2d4d7e588a | ||
|
|
24f962f1b8 | ||
|
|
2ecb43154b | ||
|
|
dba951fe89 | ||
|
|
245854b85a | ||
|
|
5867f3699c | ||
|
|
7d5f57f923 | ||
|
|
2d497cbd36 | ||
|
|
eabe51c446 | ||
|
|
a479450940 | ||
|
|
b523e5832c | ||
|
|
079e07a51f | ||
|
|
025940d4f1 | ||
|
|
8c9c1e206d | ||
|
|
4c9cbf0706 | ||
|
|
a70a4766d2 | ||
|
|
1741f832eb | ||
|
|
b3327d7522 | ||
|
|
10793ac11f | ||
|
|
7ce760a5dd | ||
|
|
af034befb0 | ||
|
|
a8487b78c9 | ||
|
|
58bf93b10c | ||
|
|
f464e03380 | ||
|
|
efbe3a26c1 | ||
|
|
4f59d9286c | ||
|
|
6c75de9334 | ||
|
|
f425eed07c | ||
|
|
7a975d98fb | ||
|
|
635ecd7b1a | ||
|
|
29305777bb | ||
|
|
6d0b83aabf | ||
|
|
6ba712d612 | ||
|
|
eab5c2b86b | ||
|
|
e7b389ae6c | ||
|
|
e51e51dfd4 | ||
|
|
cd0194cb68 | ||
|
|
a73f14e03d | ||
|
|
e3b8c3b611 | ||
|
|
da9f24cf30 | ||
|
|
67de7f5646 | ||
|
|
43c69ec339 | ||
|
|
014fb518bc | ||
|
|
321c6a5392 | ||
|
|
db98f2810f | ||
|
|
062dfa3e75 | ||
|
|
1244a950e7 | ||
|
|
8df910361c | ||
|
|
37da441e96 | ||
|
|
6faf224e20 | ||
|
|
92372d20a9 | ||
|
|
12f0997193 | ||
|
|
e428877473 | ||
|
|
cecd691a84 | ||
|
|
1c7b3c3072 | ||
|
|
b1ea04b036 | ||
|
|
36a66f4e8b | ||
|
|
b39160e4c4 | ||
|
|
a22b414b58 | ||
|
|
8de046a561 | ||
|
|
f7c9ae8ba3 | ||
|
|
75ea0f48d9 | ||
|
|
acfc5acfb2 | ||
|
|
6506a82b19 | ||
|
|
66f4e62c6c | ||
|
|
80a23bd2fd | ||
|
|
2bdbac3e15 | ||
|
|
5b9f2ec9fc | ||
|
|
fc220d5f79 | ||
|
|
3344b5b86a | ||
|
|
557fd0df26 | ||
|
|
9bb3d4ef28 | ||
|
|
4ced58b5b7 | ||
|
|
831df90c93 | ||
|
|
82ef9e4806 | ||
|
|
879d847ffb | ||
|
|
4379d2772c | ||
|
|
21187bc28a | ||
|
|
9bad0d52f7 | ||
|
|
92fabf43b3 | ||
|
|
7d8c28a9dc | ||
|
|
bbef017989 | ||
|
|
7515af639a | ||
|
|
39b66086cc | ||
|
|
872330bee9 | ||
|
|
2cdc3defb7 | ||
|
|
da7c981f14 | ||
|
|
19c671a60a | ||
|
|
17d40b7a73 | ||
|
|
4e40c0320e | ||
|
|
a3dbb309d0 | ||
|
|
c436f84b3d | ||
|
|
f685cd228f | ||
|
|
63f9db72e8 | ||
|
|
004cfe380d | ||
|
|
b1d9665b03 | ||
|
|
4fa7e1bd76 | ||
|
|
22bf24b775 | ||
|
|
6deaa0fb1a | ||
|
|
4fe609a043 | ||
|
|
e6cb2f8220 | ||
|
|
b7bdb7f3b1 | ||
|
|
9baea83066 | ||
|
|
56be4a6761 | ||
|
|
b506ac5823 | ||
|
|
fec31b71c0 | ||
|
|
89d01b84f8 | ||
|
|
fc3b4e9ae1 | ||
|
|
2565f67824 | ||
|
|
3ee7a0d881 | ||
|
|
7207041c37 | ||
|
|
7f9cb43ffa | ||
|
|
20b21e8639 | ||
|
|
3d09afbfb3 | ||
|
|
b0315e5e9f | ||
|
|
f8f16fadb9 | ||
|
|
ba53218711 | ||
|
|
1415fcc6dc | ||
|
|
ab82b2ea64 | ||
|
|
1dcba155a2 | ||
|
|
9c8d30fa86 | ||
|
|
1d004a7326 | ||
|
|
a2e8b2aa0c | ||
|
|
3e4816c811 | ||
|
|
8e5912e4c2 | ||
|
|
2959b54e7b | ||
|
|
f49317d7e4 | ||
|
|
2546d3f823 | ||
|
|
0c5d38090e | ||
|
|
cd00aad610 | ||
|
|
eb4b2b1ecd | ||
|
|
b5f7ff2e33 | ||
|
|
21fd807037 | ||
|
|
b0d99abf22 | ||
|
|
0135d8b6c3 | ||
|
|
ecf67862e2 | ||
|
|
aeee2cf05e | ||
|
|
f0c400235a | ||
|
|
7848332d47 | ||
|
|
1fcf95af01 | ||
|
|
a503fa8673 | ||
|
|
371b172616 | ||
|
|
ddb7a20c53 | ||
|
|
a4fe76f6a9 | ||
|
|
9d7e073a9d | ||
|
|
118ee7f9aa | ||
|
|
e0b5c3a146 | ||
|
|
cbc80d5bc4 | ||
|
|
20a3208564 | ||
|
|
91ba39bd3b | ||
|
|
f6ea93e273 | ||
|
|
d728c926c1 | ||
|
|
9ecc88a898 | ||
|
|
18b000e324 | ||
|
|
e6dd22ffb5 | ||
|
|
92a6b7f4a4 | ||
|
|
e39a38ecf2 | ||
|
|
9d9b56073c | ||
|
|
07bb2bb956 | ||
|
|
abe3f1ba4b | ||
|
|
1375df185d | ||
|
|
8f93fbb87b | ||
|
|
68893a1e15 | ||
|
|
9440316c20 | ||
|
|
f9554e0bde | ||
|
|
89f059ae03 | ||
|
|
7360489d1b | ||
|
|
61b758450e | ||
|
|
9539f29f94 | ||
|
|
6cc7bdf7d3 | ||
|
|
8f4a2f98d7 | ||
|
|
8ddc1a1e92 | ||
|
|
d240796110 | ||
|
|
7502190135 | ||
|
|
aea3f0f90d | ||
|
|
f66f7f14f5 | ||
|
|
d8bcea88a7 | ||
|
|
2629a9c42f | ||
|
|
90fe733f94 | ||
|
|
5ed97f7f9e | ||
|
|
80153f9a80 | ||
|
|
4306599396 | ||
|
|
6e59596285 | ||
|
|
c2e6a1408d | ||
|
|
4e08866e87 | ||
|
|
cbd6dd3356 | ||
|
|
eb05e7a138 | ||
|
|
22f1ca24d9 | ||
|
|
8b36f2e8ae | ||
|
|
34d13f71c2 | ||
|
|
1aef2f07d3 | ||
|
|
142e9a1583 | ||
|
|
ed8b1be178 | ||
|
|
399e1d2eb8 | ||
|
|
ba2e2f509a | ||
|
|
6d43d7ba19 | ||
|
|
ace01c86de | ||
|
|
d4b184a7d5 | ||
|
|
76bd274fc4 | ||
|
|
0a805861ea | ||
|
|
2b297c28d5 | ||
|
|
d0a9d8df33 | ||
|
|
88f3b41e71 | ||
|
|
89b6b9ee44 | ||
|
|
39c299a32d | ||
|
|
3929fa672e | ||
|
|
43888e9e0a | ||
|
|
a26d86044e | ||
|
|
5946c2920a | ||
|
|
6b90dc8bb7 | ||
|
|
1b9a70d089 | ||
|
|
40d1360b74 | ||
|
|
57578f16d4 | ||
|
|
003aef75d2 | ||
|
|
e3397c1c35 | ||
|
|
c4ce97f1a5 | ||
|
|
f95f5857ef | ||
|
|
cedd47b92e | ||
|
|
7fa8f7797a | ||
|
|
a456daa0b2 | ||
|
|
ecde8fa8af | ||
|
|
29654c39a5 | ||
|
|
d8d49be5d9 | ||
|
|
769ef71db7 | ||
|
|
87b9ff2131 | ||
|
|
a45748f020 | ||
|
|
ccefc29eb0 | ||
|
|
76a44ecd58 | ||
|
|
787cf47c39 | ||
|
|
9376f034ea | ||
|
|
1977dc2ce7 | ||
|
|
3fd4458e6a | ||
|
|
ae0b97d807 | ||
|
|
50e70f73ae | ||
|
|
df1a1cf1bd | ||
|
|
0d034cd18e | ||
|
|
dd8ce677ba | ||
|
|
c6f1defa9d | ||
|
|
6e46ff345a | ||
|
|
b6c468117e | ||
|
|
1b23e31464 | ||
|
|
c02b6fee8f | ||
|
|
87eddf8bbd | ||
|
|
9648db0837 | ||
|
|
ba0b997234 | ||
|
|
864db74306 | ||
|
|
e48d9faf27 | ||
|
|
031129778e | ||
|
|
ed9fdce6a8 | ||
|
|
d2f6eebc66 | ||
|
|
4cb0fd3949 | ||
|
|
e0f0eca512 | ||
|
|
bfabcdcdd1 | ||
|
|
224b59e740 | ||
|
|
553b519d0f | ||
|
|
b80f3148fd | ||
|
|
d6e745203d | ||
|
|
0806074d94 | ||
|
|
13d4a38eca | ||
|
|
5ec1fbd1ca | ||
|
|
fadd718d08 | ||
|
|
28a500fce9 | ||
|
|
745775bf4b | ||
|
|
ce3de2b516 | ||
|
|
8034ef24ff | ||
|
|
626fc6aa8d | ||
|
|
cc9ae23a0c | ||
|
|
7152ffd730 | ||
|
|
6300898810 | ||
|
|
7c8876a812 | ||
|
|
b3df59ca13 | ||
|
|
b4130af2bf | ||
|
|
5394008d6f | ||
|
|
3583f7a09f | ||
|
|
df3c387f2e | ||
|
|
fa0533fae9 | ||
|
|
86c3f89b2e | ||
|
|
b00cec954e | ||
|
|
b379d5148c | ||
|
|
aecd005c60 | ||
|
|
6dd331b21d | ||
|
|
c4bbb64622 | ||
|
|
7143058462 | ||
|
|
c5d5914866 | ||
|
|
af656d4b02 | ||
|
|
9e9868bd16 | ||
|
|
cbe4c1b370 | ||
|
|
ad55f9e310 | ||
|
|
0b4590b237 | ||
|
|
f10c61f591 | ||
|
|
31e6d8fbb1 | ||
|
|
dd278b46a8 | ||
|
|
da5b509cc6 | ||
|
|
2b573d8642 | ||
|
|
519484816d | ||
|
|
6da420d865 | ||
|
|
f8567450ee | ||
|
|
08961919b5 | ||
|
|
92939cf118 | ||
|
|
fb843aa15b | ||
|
|
7ce49bf89c | ||
|
|
09571d1117 | ||
|
|
573202140d | ||
|
|
fdbc30365d | ||
|
|
b70c62a1b3 | ||
|
|
2b9d2ca293 | ||
|
|
12120d7e8b | ||
|
|
727a5883f2 | ||
|
|
ca80d87dcf | ||
|
|
e884cef1ef | ||
|
|
597408a977 | ||
|
|
548874a641 | ||
|
|
cf56c67329 | ||
|
|
9fe82ec5f1 | ||
|
|
2aa80e3576 | ||
|
|
0f248768a3 | ||
|
|
52546fad90 | ||
|
|
bd594e19ff | ||
|
|
2e05e032ee | ||
|
|
733f80b7ae | ||
|
|
ae7be3ea94 | ||
|
|
a8dbdfd1c4 | ||
|
|
3d293c96bc | ||
|
|
02c17d875e | ||
|
|
076f8805d2 | ||
|
|
5aebb76146 | ||
|
|
ec6ec2abe9 | ||
|
|
b59604b47c | ||
|
|
66fe580e99 | ||
|
|
a448b3474e | ||
|
|
04cacabc16 | ||
|
|
3bc0389bab | ||
|
|
15bee7456c | ||
|
|
8bdf05dae4 | ||
|
|
ee865fe97f | ||
|
|
9a859875a7 | ||
|
|
e0cac97084 | ||
|
|
a5f7de429d | ||
|
|
aa90173891 | ||
|
|
409462e989 | ||
|
|
a8f3c62d37 | ||
|
|
7ba43e0c3f | ||
|
|
43c3f1ab2e | ||
|
|
b70f3aefe5 | ||
|
|
1e56ecfdb4 | ||
|
|
42616e7d8a | ||
|
|
271eb9b837 | ||
|
|
48433eb36b | ||
|
|
bc4351f51a | ||
|
|
531954511b | ||
|
|
a15a106fd3 | ||
|
|
b0d9db1bcc | ||
|
|
1a349bb609 | ||
|
|
0ee4f0417d | ||
|
|
ebe39c8663 | ||
|
|
1e8463ac2d | ||
|
|
a5dbc324f6 | ||
|
|
27cd82065b | ||
|
|
9e44bc28d9 | ||
|
|
0acb8c8d3c | ||
|
|
ce71a5bac8 | ||
|
|
425e95bed4 | ||
|
|
418811ef19 | ||
|
|
c9026cd150 | ||
|
|
63a5381968 | ||
|
|
74a328de41 | ||
|
|
8a313bc653 | ||
|
|
6dfae48b65 | ||
|
|
8a8a278029 | ||
|
|
f7b0cf8f8a | ||
|
|
69f766d41d | ||
|
|
5dea51c062 | ||
|
|
b16bf52580 | ||
|
|
f47927331f | ||
|
|
066bc84e2a | ||
|
|
9f0d2606b1 | ||
|
|
f986600d5b | ||
|
|
349dd98a2f | ||
|
|
60bbcc12d8 | ||
|
|
259fc0e794 | ||
|
|
a1593c4b7b | ||
|
|
8606cc9662 | ||
|
|
613f324a47 | ||
|
|
d8c7a25487 | ||
|
|
07a71236aa | ||
|
|
757d987204 | ||
|
|
899f736b8c | ||
|
|
6001f1f456 | ||
|
|
99b35e1a61 | ||
|
|
e5902533eb | ||
|
|
0d667466e8 | ||
|
|
9bfec08d90 | ||
|
|
6cc8a2f8dd | ||
|
|
6fe7a4c9dc | ||
|
|
924eb1abaa | ||
|
|
a7748a360e | ||
|
|
84bb0a9a21 | ||
|
|
e1f44e2654 | ||
|
|
9af3637403 | ||
|
|
6a93de3931 | ||
|
|
6c87c793db | ||
|
|
5fdc20886d | ||
|
|
23c1b32a02 | ||
|
|
d4eeb74641 | ||
|
|
31c4e6560d | ||
|
|
4b1a7436a9 | ||
|
|
549da37805 | ||
|
|
240f9f86b1 | ||
|
|
b638bd7eeb | ||
|
|
5fa5b9a9a9 | ||
|
|
9118869d04 | ||
|
|
e92bdbea64 | ||
|
|
d71a620a18 | ||
|
|
7cac20fc89 | ||
|
|
260a271859 | ||
|
|
611859f04a | ||
|
|
fd4c6f6a71 | ||
|
|
092cc26789 | ||
|
|
a3bce5f42e | ||
|
|
a01970602a | ||
|
|
da4f036622 | ||
|
|
ffa417f745 | ||
|
|
61a4eec144 | ||
|
|
9edae03812 | ||
|
|
63f5416b21 | ||
|
|
5a66b56b93 | ||
|
|
2596ddfa25 | ||
|
|
89c8d1183b | ||
|
|
7da347866b | ||
|
|
d3d9cc6fac | ||
|
|
81e91accfa | ||
|
|
a544f7d7bf | ||
|
|
3fd7e7835a | ||
|
|
a9cf376000 | ||
|
|
fe81958d2c | ||
|
|
12255109bd | ||
|
|
e9145bbe2e | ||
|
|
c307a263ec | ||
|
|
1c7109d5aa | ||
|
|
85e3b356dd | ||
|
|
518ae7eb4c | ||
|
|
619ae2b178 | ||
|
|
568febea79 | ||
|
|
8d6a645915 | ||
|
|
fd70eda033 | ||
|
|
622d488fc3 | ||
|
|
f0d7077efc | ||
|
|
ee7480bcda | ||
|
|
68d01f97a4 | ||
|
|
4e17853ecf | ||
|
|
7eaca5a56d | ||
|
|
82f89c501a | ||
|
|
9bcd532c19 | ||
|
|
84dcbf4f5f | ||
|
|
57a22f99aa | ||
|
|
cc81dd04e9 | ||
|
|
c85507e46d | ||
|
|
90ff9d57b8 | ||
|
|
fb6085da39 | ||
|
|
911f8736f1 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.go.tmpl linguist-language=Go
|
||||
generated/** linguist-generated
|
||||
42
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
42
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Explain a problem you are experiencing
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hey! Thanks for opening an issue!
|
||||
|
||||
IMPORTANT: If you believe this bug is a security issue, please don't use this template and follow our [security guidelines](/doc/security.md).
|
||||
|
||||
It is recommended that you include screenshots and logs to help everyone achieve a shared understanding of the bug.
|
||||
|
||||
-->
|
||||
|
||||
**What happened?**
|
||||
|
||||
> Please be specific and include screenshots and logs!
|
||||
|
||||
**What did you expect to happen?**
|
||||
|
||||
> Please be specific and include proposed behavior!
|
||||
|
||||
**What is the simplest way to reproduce this behavior?**
|
||||
|
||||
**In what environment did you see this bug?**
|
||||
- Pinniped server version:
|
||||
- Pinniped client version:
|
||||
- Pinniped container image (if using a public container image):
|
||||
- Pinniped configuration (what IDP(s) are you using? what downstream credential minting mechanisms are you using?):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version (e.g., `kubeadm version`:
|
||||
- Cloud provider or hardware configuration:
|
||||
- OS (e.g: `cat /etc/os-release`):
|
||||
- Kernel (e.g. `uname -a`):
|
||||
- Others:
|
||||
|
||||
**What else is there to know about this bug?**
|
||||
46
.github/ISSUE_TEMPLATE/feature-proposal.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/feature-proposal.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
name: Feature proposal
|
||||
about: Suggest a way to improve this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hey! Thanks for opening an issue!
|
||||
|
||||
It is recommended that you include screenshots and logs to help everyone achieve a shared understanding of the improvement.
|
||||
|
||||
-->
|
||||
|
||||
**What is the problem that you wish to solve?**
|
||||
|
||||
> Please provide a clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**What is the best solution to the above problem?**
|
||||
|
||||
> Please provide a clear and concise description of what the solution is.
|
||||
|
||||
**What are the alternative solutions that you have considered?**
|
||||
|
||||
> Please provide a clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**How will this project improvement be tested?**
|
||||
|
||||
> Please include your thoughts on how this improvement can be thoroughly tested.
|
||||
|
||||
**In what environment do you hope to see this improvement?**
|
||||
- Pinniped server version:
|
||||
- Pinniped client version:
|
||||
- Pinniped container image (if using a public container image):
|
||||
- Pinniped configuration (what IDP(s) are you using? what downstream credential minting mechanisms are you using?):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version (e.g., `kubeadm version`:
|
||||
- Cloud provider or hardware configuration:
|
||||
- OS (e.g: `cat /etc/os-release`):
|
||||
- Kernel (e.g. `uname -a`):
|
||||
- Others:
|
||||
|
||||
**What else is there to know about this improvement?**
|
||||
13
.github/dependabot.yml
vendored
Normal file
13
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# See https://docs.github.com/en/github/administering-a-repository/enabling-and-disabling-version-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
28
.github/pull_request_template.md
vendored
Normal file
28
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
Thank you for submitting a pull request for Pinniped!
|
||||
|
||||
Before submitting, please see the guidelines in doc/contributing.md in this repo.
|
||||
|
||||
Please note that a project maintainer will need to review and provide an
|
||||
initial approval on the PR to cause CI tests to automatically start.
|
||||
Also note that if you push additional commits to the PR, those commits
|
||||
will need another initial approval before CI will pick them up.
|
||||
|
||||
Reminder: Did you remember to run all the linter, unit tests, and integration tests
|
||||
described in doc/contributing.md on your branch before submitting this PR?
|
||||
|
||||
Below is a template to help you describe your PR.
|
||||
|
||||
-- Please delete this line and all lines above this line before submitting the PR. Thanks! --
|
||||
|
||||
**Summary of the changes included in this PR**
|
||||
|
||||
**Issue(s) addressed by this PR**
|
||||
- Fixes #
|
||||
|
||||
**Things to consider while reviewing this PR**
|
||||
|
||||
**Suggested release note for the first release which contains this PR**
|
||||
|
||||
```
|
||||
release-note here
|
||||
```
|
||||
23
.gitignore
vendored
23
.gitignore
vendored
@@ -1,7 +1,18 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# goland
|
||||
.idea
|
||||
.terraform
|
||||
*.tfstate.*
|
||||
*.tfstate
|
||||
kubeconfig.yaml
|
||||
.DS_Store
|
||||
site/
|
||||
70
.golangci.yaml
Normal file
70
.golangci.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
# https://github.com/golangci/golangci-lint#config-file
|
||||
run:
|
||||
deadline: 1m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
# default linters
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
# additional linters for this project (we should disable these if they get annoying).
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- depguard
|
||||
- dogsled
|
||||
- exhaustive
|
||||
- exportloopref
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- goerr113
|
||||
- goheader
|
||||
- goimports
|
||||
- golint
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- misspell
|
||||
- nakedret
|
||||
- nestif
|
||||
- noctx
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- rowserrcheck
|
||||
- scopelint
|
||||
- sqlclosecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- whitespace
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# exclude tests from some rules for things that are useful in a testing context.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- goerr113
|
||||
|
||||
linters-settings:
|
||||
funlen:
|
||||
lines: 125
|
||||
statements: 50
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
goimports:
|
||||
local-prefixes: go.pinniped.dev
|
||||
@@ -1,23 +1,17 @@
|
||||
# This is a configuration for https://pre-commit.com/.
|
||||
# On macOS, try `brew install pre-commit` and then run `pre-commit install`.
|
||||
exclude: '^generated/'
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
# TODO: find a version of this to validate ytt templates?
|
||||
# - id: check-yaml
|
||||
# args: ['--allow-multiple-documents']
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
- id: check-byte-order-marker
|
||||
- id: detect-private-key
|
||||
- id: mixed-line-ending
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: validate-copyright-year
|
||||
name: Validate copyright year
|
||||
entry: hack/check-copyright-year.sh
|
||||
language: script
|
||||
- repo: git://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
# TODO: find a version of this to validate ytt templates?
|
||||
# - id: check-yaml
|
||||
# args: ['--allow-multiple-documents']
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
- id: check-byte-order-marker
|
||||
- id: detect-private-key
|
||||
exclude: testdata
|
||||
- id: mixed-line-ending
|
||||
|
||||
603
AD-SETUP.md
603
AD-SETUP.md
@@ -1,603 +0,0 @@
|
||||
# Creating an Active Directory server on Google Cloud for Pinniped integration tests
|
||||
|
||||
This documents the steps that were taken to create our test AD server used by the integration tests.
|
||||
The integration tests use LDAPS and StartTLS to connect to the AD server.
|
||||
|
||||
## Create a Windows Server VM and configure it as an AD Domain Controller
|
||||
|
||||
The steps in this section were mostly inspired by
|
||||
https://cloud.google.com/architecture/deploy-an-active-directory-forest-on-compute-engine.
|
||||
|
||||
From your Mac, create a VPC, subnet, firewall rules, admin password, reserved static IP, and the VM itself.
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Login as yourself.
|
||||
gcloud auth login
|
||||
|
||||
# Set some variables.
|
||||
project="REDACTED" # Change this to be the actual project name before running these commands.
|
||||
region="us-west1"
|
||||
zone="us-west1-c"
|
||||
vpc_name="ad"
|
||||
|
||||
# Create VPC.
|
||||
gcloud compute networks create ${vpc_name} \
|
||||
--project ${project} \
|
||||
--description "VPC network to deploy Active Directory" \
|
||||
--subnet-mode custom
|
||||
|
||||
# Create subnet.
|
||||
# The google tutorial says to "enable Private Google Access so that Windows can activate without internet access."
|
||||
gcloud compute networks subnets create domain-controllers \
|
||||
--project ${project} --region ${region} \
|
||||
--network ${vpc_name} \
|
||||
--range "10.0.0.0/28" \
|
||||
--enable-private-ip-google-access
|
||||
|
||||
# Create a firewall rule to allow RDP. Find out what your public IP address is by going to https://whatismyipaddress.com.
|
||||
# Replace the X.X.X.X placeholder address shown here with your real IPv4 address.
|
||||
my_ip=X.X.X.X
|
||||
gcloud compute firewall-rules create allow-rdp-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:3389 \
|
||||
--source-ranges "${my_ip}/32" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Allow LDAPS (port 636) from the whole internet.
|
||||
gcloud compute firewall-rules create allow-ldaps-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:636 \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Allow LDAP (port 389) from the whole internet, to allow the integration tests to use StartTLS.
|
||||
gcloud compute firewall-rules create allow-ldap-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:389 \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Reserve a static public IP address for the domain controller VM.
|
||||
addressOfDc1=$(gcloud compute addresses create ad-domain-controller \
|
||||
--project ${project} --region ${region} \
|
||||
--format="value(address)")
|
||||
|
||||
# Create an admin password for the Administrator user on Windows, and save it to secrets manager.
|
||||
password="$(openssl rand -hex 8)-$(openssl rand -hex 8)"
|
||||
echo -n "$password" > password.tmp
|
||||
gcloud secrets create active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
--data-file password.tmp
|
||||
rm password.tmp
|
||||
|
||||
# This creates a service account called ad-domaincontroller@PROJECT_NAME.iam.gserviceaccount.com
|
||||
# (where PROJECT_NAME is the actual GCP project name) and sets the account name to the
|
||||
# variable $dcServiceAccount.
|
||||
dcServiceAccount=$(gcloud iam service-accounts create ad-domaincontroller \
|
||||
--project ${project} \
|
||||
--display-name "AD Domain Controller VM Service Account" \
|
||||
--format "value(email)")
|
||||
|
||||
# Allow the new service account to temporarily read the Windows admin password from secret manager.
|
||||
# The following `date` command might only work on MacOS. It prints the time like this: 2024-10-23T19:20:36Z
|
||||
one_hour_from_now=$(TZ=UTC date -v "+1H" +"%Y-%m-%dT%H:%M:%SZ")
|
||||
gcloud secrets add-iam-policy-binding active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
"--member=serviceAccount:$dcServiceAccount" \
|
||||
--role=roles/secretmanager.secretAccessor \
|
||||
--condition="title=Expires after 1h,expression=request.time < timestamp('$one_hour_from_now')"
|
||||
|
||||
# Optional: list all bindings to see the binding that you just created.
|
||||
gcloud secrets get-iam-policy active-directory-dc1-password \
|
||||
--project ${project}
|
||||
|
||||
# Create a powershell startup script in a local file.
|
||||
cat <<"EOF" > dc-startup.ps1
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
#
|
||||
# Only run the script if the VM is not a domain controller already.
|
||||
#
|
||||
if ((Get-CimInstance -ClassName Win32_OperatingSystem).ProductType -eq 2) {
|
||||
exit
|
||||
}
|
||||
|
||||
#
|
||||
# Read configuration from metadata.
|
||||
#
|
||||
Import-Module "${Env:ProgramFiles}\Google\Compute Engine\sysprep\gce_base.psm1"
|
||||
|
||||
Write-Host "Reading metadata..."
|
||||
$ActiveDirectoryDnsDomain = Get-MetaData -Property "attributes/ActiveDirectoryDnsDomain" -instance_only
|
||||
$ActiveDirectoryNetbiosDomain = Get-MetaData -Property "attributes/ActiveDirectoryNetbiosDomain" -instance_only
|
||||
$ProjectId = Get-MetaData -Property "project-id" -project_only
|
||||
$AccessToken = (Get-MetaData -Property "service-accounts/default/token" | ConvertFrom-Json).access_token
|
||||
|
||||
#
|
||||
# Read the DSRM password from secret manager.
|
||||
#
|
||||
Write-Host "Reading secret from secret manager..."
|
||||
$Secret = (Invoke-RestMethod `
|
||||
-Headers @{
|
||||
"Metadata-Flavor" = "Google";
|
||||
"x-goog-user-project" = $ProjectId;
|
||||
"Authorization" = "Bearer $AccessToken"} `
|
||||
-Uri "https://secretmanager.googleapis.com/v1/projects/$ProjectId/secrets/active-directory-dc1-password/versions/latest:access")
|
||||
$DsrmPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Secret.payload.data))
|
||||
$DsrmPassword = ConvertTo-SecureString -AsPlainText $DsrmPassword -force
|
||||
|
||||
#
|
||||
# Promote.
|
||||
#
|
||||
Write-Host "Setting administrator password..."
|
||||
Set-LocalUser -Name Administrator -Password $DsrmPassword
|
||||
|
||||
Write-Host "Creating a new forest $ActiveDirectoryDnsDomain ($ActiveDirectoryNetbiosDomain)..."
|
||||
Install-ADDSForest `
|
||||
-DomainName $ActiveDirectoryDnsDomain `
|
||||
-DomainNetbiosName $ActiveDirectoryNetbiosDomain `
|
||||
-SafeModeAdministratorPassword $DsrmPassword `
|
||||
-DomainMode Win2008R2 `
|
||||
-ForestMode Win2008R2 `
|
||||
-InstallDns `
|
||||
-CreateDnsDelegation:$False `
|
||||
-NoRebootOnCompletion:$True `
|
||||
-Confirm:$false
|
||||
|
||||
#
|
||||
# Configure DNS.
|
||||
#
|
||||
Write-Host "Configuring DNS settings..."
|
||||
Get-Netadapter| Disable-NetAdapterBinding -ComponentID ms_tcpip6
|
||||
Set-DnsClientServerAddress `
|
||||
-InterfaceIndex (Get-NetAdapter -Name Ethernet).InterfaceIndex `
|
||||
-ServerAddresses 127.0.0.1
|
||||
|
||||
#
|
||||
# Enable LSA protection.
|
||||
#
|
||||
New-ItemProperty `
|
||||
-Path "HKLM:\SYSTEM\CurrentControlSet\Control\Lsa" `
|
||||
-Name "RunAsPPL" `
|
||||
-Value 1 `
|
||||
-PropertyType DWord
|
||||
|
||||
Write-Host "Restarting to apply all settings..."
|
||||
Restart-Computer
|
||||
EOF
|
||||
|
||||
# Create a domain controller VM.
|
||||
# E2 are the cheapest VMs. e2-medium has 2 vCPUs (shared with other customers) and 4 GB of memory.
|
||||
# See https://cloud.google.com/compute/docs/general-purpose-machines#e2-shared-core.
|
||||
# When we originally set up this VM, we actually started it as n2-standard-2 and after we
|
||||
# finished setting up everything as shown in this guide, then we stopped the VM and changed its
|
||||
# type to e2-medium and started the VM again. Maybe it would work fine to create it as
|
||||
# e2-medium from the beginning, but note that we didn't actually test that.
|
||||
gcloud compute instances create active-directory-dc1 \
|
||||
--project ${project} \
|
||||
--zone ${zone} \
|
||||
--image-family windows-2022 \
|
||||
--image-project windows-cloud \
|
||||
--machine-type e2-medium \
|
||||
--tags ad-domaincontroller \
|
||||
--metadata "ActiveDirectoryDnsDomain=activedirectory.test.pinniped.dev,ActiveDirectoryNetbiosDomain=pinniped-ad,sysprep-specialize-script-ps1=Install-WindowsFeature AD-Domain-Services -IncludeManagementTools; Install-WindowsFeature DNS,disable-account-manager=true" \
|
||||
--metadata-from-file windows-startup-script-ps1=dc-startup.ps1 \
|
||||
--address ${addressOfDc1} \
|
||||
--subnet=domain-controllers \
|
||||
--service-account "$dcServiceAccount" \
|
||||
--scopes cloud-platform \
|
||||
--shielded-integrity-monitoring \
|
||||
--shielded-secure-boot \
|
||||
--shielded-vtpm
|
||||
|
||||
# Monitor the initialization process of the first domain controller by viewing its serial port output.
|
||||
# It should install the sysprep stuff, reboot, run our startup script, and then reboot again.
|
||||
gcloud compute instances tail-serial-port-output active-directory-dc1 \
|
||||
--project ${project} \
|
||||
--zone ${zone}
|
||||
# Use CTRL-C to cancel tailing the output.
|
||||
```
|
||||
|
||||
## Update DNS
|
||||
|
||||
Update the Cloud DNS entry for `activedirectory.test.pinniped.dev.` to be an "A" record pointing to the
|
||||
public static IP of the VM. This is easier to do in the Cloud DNS UI in your browser.
|
||||
It would take many gcloud CLI commands to accomplish the same task.
|
||||
|
||||
## Configure test users and groups
|
||||
|
||||
Make sure you have an RDP client installed. On a Mac, you can install RDP from the App Store.
|
||||
It was recently renamed "Windows App".
|
||||
|
||||
Note: To copy/paste in the RDP client, you may need to use CTRL-C/CTRL-V if CMD-C/CMD-V don't work.
|
||||
|
||||
RDP into the Windows VM. To connect, use `activedirectory.test.pinniped.dev` as the name of the server,
|
||||
the username `Administrator`, and the password from the `active-directory-dc1-password` entry in Secrets Manager.
|
||||
You can ignore the RDP certificate error.
|
||||
|
||||
In your RDP session, open Powershell. Then run the following commands to add some users and groups,
|
||||
change the password policy, and grant some permissions.
|
||||
|
||||
Before running the commands, replace the redacted passwords as follows:
|
||||
- The value for `REDACTED_BIND_USER_PASSWORD` can be found at `aws-ad-bind-account-password` in the `concourse-secrets` secret
|
||||
- The value for `REDACTED_PINNY_USER_PASSWORD` can be found at `aws-ad-user-password` in the `concourse-secrets` secret
|
||||
- The value for `REDACTED_DEACTIVATED_USER_PASSWORD` can be found at `aws-ad-deactivated-user-password` in the `concourse-secrets` secret
|
||||
|
||||
```shell
|
||||
New-ADOrganizationalUnit -Name "pinniped-ad" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
New-ADOrganizationalUnit -Name "Users" `
|
||||
-Path "OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
New-ADOrganizationalUnit -Name "test-users" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-Description "integration tests will create and delete ephemeral users here" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
# Print all OUs to validate that they were created.
|
||||
Get-ADOrganizationalUnit -Filter *
|
||||
|
||||
New-ADUser -Name "Bind User" -SamAccountName "bind-user" -GivenName "Bind" -Surname "User" -DisplayName "Bind User" `
|
||||
-UserPrincipalName "bind-user@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_BIND_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $true -PasswordNeverExpires $true
|
||||
|
||||
# Note that the value of EmailAddress is not a real email address, but that's okay.
|
||||
New-ADUser -Name "Pinny Seal" -SamAccountName "pinny" -GivenName "Pinny" -Surname "Seal" -DisplayName "Pinny Seal" `
|
||||
-UserPrincipalName "pinny@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-EmailAddress "tanzu-user-authentication@groups.vmware.com" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_PINNY_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $true -PasswordNeverExpires $true
|
||||
|
||||
New-ADUser -Name "Deactivated User" -SamAccountName "deactivated-user" -GivenName "Deactivated" -Surname "User" -DisplayName "Deactivated User" `
|
||||
-UserPrincipalName "deactivated-user@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_DEACTIVATED_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $false -PasswordNeverExpires $true
|
||||
|
||||
# Take note of the pinny account's ObjectGUID. You will need to edit the concourse-secrets secret later to update this GUID value.
|
||||
# This value should look something like "288188dd-ab76-4f61-b6e4-c72e081502c5".
|
||||
Get-ADUser pinny -Properties * | Select SamaccountName,ObjectGUID
|
||||
|
||||
# Print all users to validate that they were created.
|
||||
Get-ADUser -Filter *
|
||||
|
||||
New-ADGroup -Name "Marine Mammals" -SamAccountName "Marine Mammals" -DisplayName "Marine Mammals" `
|
||||
-GroupCategory Security -GroupScope Global `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
|
||||
Add-ADGroupMember -Identity "Marine Mammals" -Members "pinny"
|
||||
|
||||
New-ADGroup -Name "Mammals" -SamAccountName "Mammals" -DisplayName "Mammals" `
|
||||
-GroupCategory Security -GroupScope Global `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
|
||||
Add-ADGroupMember -Identity "Mammals" -Members "Marine Mammals"
|
||||
|
||||
# Change the default password policy. There are some integration tests that rely on this.
|
||||
# This is the equivalent of doing this in the Windows "Active Directory Administrative Center" UI:
|
||||
# check "enforce account lockout policy", give it 20 failed attempts and a 15-minute reset, then
|
||||
# uncheck "enforce minimum password age" so we can change the password immediately upon creating a user.
|
||||
Set-ADDefaultDomainPasswordPolicy -Identity "activedirectory.test.pinniped.dev" `
|
||||
-LockoutThreshold 20 -LockoutDuration "00:15:00" -LockoutObservationWindow "00:15:00" `
|
||||
-MinPasswordAge 0
|
||||
|
||||
# Print the policy to validate that it was updated.
|
||||
Get-ADDefaultDomainPasswordPolicy
|
||||
|
||||
# We need to allow the bind-user to create/delete/edit users and groups within the test-users OU, because several
|
||||
# integration tests want to crate/delete/edit ephemeral test users and groups.
|
||||
# These access control steps were inspired by https://the-itguy.de/delegate-access-in-active-directory-with-powershell/.
|
||||
# This is intended to be the equivalent of using the UI to assign permissions like this: right click on "test-users",
|
||||
# select Delegate Control, select "bind-user" as the user, select "create, delete and manage user accounts" and
|
||||
# "reset user passwords" as the tasks to delegate.
|
||||
function New-ADDGuidMap
|
||||
{
|
||||
$rootdse = Get-ADRootDSE
|
||||
$guidmap = @{ }
|
||||
$GuidMapParams = @{
|
||||
SearchBase = ($rootdse.SchemaNamingContext)
|
||||
LDAPFilter = "(schemaidguid=*)"
|
||||
Properties = ("lDAPDisplayName", "schemaIDGUID")
|
||||
}
|
||||
Get-ADObject @GuidMapParams | ForEach-Object { $guidmap[$_.lDAPDisplayName] = [System.GUID]$_.schemaIDGUID }
|
||||
return $guidmap
|
||||
}
|
||||
$GuidMap = New-ADDGuidMap
|
||||
$BindUserSID = New-Object System.Security.Principal.SecurityIdentifier (Get-ADUser "bind-user").SID
|
||||
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
$ace1 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["user"]
|
||||
$ace2 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["user"], "All"
|
||||
$ace3 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["group"]
|
||||
$ace4 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["group"], "All"
|
||||
$acl.AddAccessRule($ace1)
|
||||
$acl.AddAccessRule($ace2)
|
||||
$acl.AddAccessRule($ace3)
|
||||
$acl.AddAccessRule($ace4)
|
||||
Set-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" -AclObject $acl
|
||||
|
||||
# Print the access control rules that were just applied.
|
||||
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
$acl.Access | Where-Object { $_.IdentityReference -eq "pinniped-ad\bind-user" }
|
||||
```
|
||||
|
||||
If you would like to see these OUs, users, and groups in the UI, you can open the "Active Directory Users and Computers"
|
||||
app in your RDP session.
|
||||
|
||||
## Configure a CA and a serving certificate for LDAPS
|
||||
|
||||
Now we need to create and configure a TLS serving certificate for LDAPS.
|
||||
|
||||
The certificate needs to include two hostnames. One of the hostnames is the name that the AD server
|
||||
thinks is its own hostname (`active-directory-dc1.activedirectory.test.pinniped.dev`).
|
||||
This is how the AD server will decide to use this cert for the LDAPS port.
|
||||
The other hostname is the one that clients will use when making connections from the outside
|
||||
(`activedirectory.test.pinniped.dev`) so they can validate the server certificate.
|
||||
|
||||
The steps here were inspired by https://gist.github.com/magnetikonline/0ccdabfec58eb1929c997d22e7341e45.
|
||||
|
||||
On your mac:
|
||||
|
||||
```shell
|
||||
# On your Mac: Create a self-signed CA public/private keypair.
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout ad-ca.key -out ad-ca.crt \
|
||||
-sha256 -days 36500 -nodes \
|
||||
-subj "/C=US/ST=California/L=San Francisco/O=Pinniped/OU=Pinniped CI/CN=Pinniped AD CA"
|
||||
|
||||
# Copy the public key to your clipboard.
|
||||
cat ad-ca.crt| pbcopy
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the public
|
||||
# key again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\ca.crt"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\ca.crt"
|
||||
|
||||
# Import root certificate into trusted store of domain controller in your Powershell terminal:
|
||||
Import-Certificate -FilePath "C:\users\administrator\desktop\ca.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
```
|
||||
|
||||
If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
|
||||
and look in the folder called "Trusted Root Certification Authorities\Certificates".
|
||||
If the UI was already open, click the refresh button.
|
||||
|
||||
Copy the following file contents to your clipboard:
|
||||
|
||||
```shell
|
||||
[Version]
|
||||
Signature="$Windows NT$"
|
||||
|
||||
[NewRequest]
|
||||
Subject = "CN=activedirectory.test.pinniped.dev"
|
||||
KeySpec = 1
|
||||
KeyLength = 2048
|
||||
Exportable = TRUE
|
||||
MachineKeySet = TRUE
|
||||
SMIME = FALSE
|
||||
PrivateKeyArchive = FALSE
|
||||
UserProtected = FALSE
|
||||
UseExistingKeySet = FALSE
|
||||
ProviderName = "Microsoft RSA SChannel Cryptographic Provider"
|
||||
ProviderType = 12
|
||||
RequestType = PKCS10
|
||||
KeyUsage = 0xa0
|
||||
|
||||
[EnhancedKeyUsageExtension]
|
||||
OID = 1.3.6.1.5.5.7.3.1 ; Server Authentication
|
||||
|
||||
[Extensions]
|
||||
2.5.29.17 = "{text}"
|
||||
_continue_ = "DNS=activedirectory.test.pinniped.dev"
|
||||
_continue_ = "DNS=active-directory-dc1.activedirectory.test.pinniped.dev"
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to copy the file contents
|
||||
# from above again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\request.inf"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\request.inf"
|
||||
|
||||
# Create a CSR. This command will also generate a private key for the AD server and save it.
|
||||
certreq -new "C:\users\administrator\desktop\request.inf" "C:\users\administrator\desktop\client.csr"
|
||||
|
||||
# Show the CSR.
|
||||
type "C:\users\administrator\desktop\client.csr"
|
||||
|
||||
# Copy the content of this file to your clipboard.
|
||||
Get-Content "C:\users\administrator\desktop\client.csr" | Set-Clipboard
|
||||
```
|
||||
|
||||
On your mac:
|
||||
|
||||
```shell
|
||||
# On your Mac, use the CA to issue a serving cert based on the CSR.
|
||||
pbpaste > client.csr
|
||||
|
||||
cat <<EOF > v3ext.txt
|
||||
keyUsage=digitalSignature,keyEncipherment
|
||||
extendedKeyUsage=serverAuth
|
||||
subjectKeyIdentifier=hash
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = activedirectory.test.pinniped.dev
|
||||
DNS.2 = active-directory-dc1.activedirectory.test.pinniped.dev
|
||||
EOF
|
||||
|
||||
# Create a cert from the CSR signed by the CA.
|
||||
openssl x509 \
|
||||
-req -days 36500 \
|
||||
-in client.csr -CA ad-ca.crt -CAkey ad-ca.key -extfile v3ext.txt \
|
||||
-set_serial 01 -out client.crt
|
||||
|
||||
# Inspect the generated certificate.
|
||||
# Ensure the following X509v3 extensions are all present:
|
||||
# Key Usage: Digital Signature, Key Encipherment
|
||||
# Extended Key Usage: TLS Web Server Authentication
|
||||
# Subject Key Identifier
|
||||
# Subject Alternative Name with 2 DNS hostnames
|
||||
# Authority Key Identifier
|
||||
openssl x509 -in client.crt -text
|
||||
|
||||
# Copy the generated cert.
|
||||
cat client.crt | pbcopy
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the file contents
|
||||
# from above again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# Add the serving certificate to Windows. This will also automatically associate it to the private key that you
|
||||
# generated with the previous usage of certreq.
|
||||
certreq -accept "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
|
||||
# and look in the folder called "Personal\Certificates". If the UI was already open, click the refresh button.
|
||||
# Double click on the cert. Ensure that it says, "you have a private key that corresponds to this certificate".
|
||||
# Next, we need to reboot the VM for the cert to get picked up and used for serving incoming LDAPS connections.
|
||||
# After showing you a warning dialog box, this should terminate your RDP session and stop the VM.
|
||||
shutdown /s
|
||||
```
|
||||
|
||||
Wait for the VM to stop, then start the VM again from your Mac:
|
||||
|
||||
```shell
|
||||
gcloud compute instances start active-directory-dc1 --project ${project} --zone ${zone}
|
||||
```
|
||||
|
||||
Wait for the VM to finish booting. Then we can confirm that LDAPS is working. On your Mac:
|
||||
|
||||
```shell
|
||||
# Check that serving cert is being returned on the LDAPS port. This command should show the cert chain.
|
||||
# It should also verify the server cert using our CA. The output should include "Verify return code: 0 (ok)".
|
||||
openssl s_client -connect activedirectory.test.pinniped.dev:636 -showcerts -CAfile ad-ca.crt < /dev/null
|
||||
|
||||
# Unfortunately, the ldapsearch command that comes pre-installed on MacOS does not seem to respect
|
||||
# the LDAPTLS_CACERT env variable. So it will not be able to validate the server certificates.
|
||||
# As a workaround, we can use docker to run ldapsearch commands in a linux container.
|
||||
|
||||
# Test the regular LDAP port by issuing a query on your Mac. The -ZZ option asks it to use StartTLS.
|
||||
# This should list all users. Replace REDACTED_BIND_USER_PASSWORD with the real password.
|
||||
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
|
||||
ldapsearch -d8 -v -x -ZZ -H 'ldap://activedirectory.test.pinniped.dev' \
|
||||
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-w 'REDACTED_BIND_USER_PASSWORD' \
|
||||
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-s sub \
|
||||
'(objectClass=user)' '*'
|
||||
|
||||
# Test the LDAPS port by issuing a query on your Mac. This should list all users.
|
||||
# Replace REDACTED_BIND_USER_PASSWORD with the real password.
|
||||
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
|
||||
ldapsearch -d8 -v -x -H 'ldaps://activedirectory.test.pinniped.dev' \
|
||||
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-w 'REDACTED_BIND_USER_PASSWORD' \
|
||||
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-s sub \
|
||||
'(objectClass=user)' '*'
|
||||
```
|
||||
|
||||
## Update the `concourse-secrets` secret in GCP Secrets Manager
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Copy the CA's public cert.
|
||||
cat ad-ca.crt | base64 | pbcopy
|
||||
|
||||
# cd to your local clone of the `ci` branch of the pinniped repo
|
||||
cd pinniped-ci-branch
|
||||
|
||||
# Edit the secret.
|
||||
./hack/edit-gcloud-secret.sh concourse-secret
|
||||
# This opens vim to edit the secret.
|
||||
# Paste the cert as the value for `aws-ad-ca-data`.
|
||||
# Also edit the the value of `aws-ad-user-unique-id-attribute-value`. The value should be the ObjectGUID of the pinny
|
||||
# user that you created in the steps above.
|
||||
# Save your changes, exit vim, and when prompted say that you want to save this as the new version of concourse-secrets.
|
||||
```
|
||||
|
||||
## Confirm that Active Directory integration tests can pass
|
||||
|
||||
Use these commands run all the Active Directory integration tests on your Mac.
|
||||
The `-run` filter is based on the tests as they existed at the time of writing this doc.
|
||||
You can find AD tests by searching for `SkipTestWhenActiveDirectoryIsUnavailable`.
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Login so we can read the secrets from GCP Secret Manager.
|
||||
gcloud auth login
|
||||
|
||||
# cd to your local git clone
|
||||
cd pinniped
|
||||
|
||||
# Compile and install onto a local kind cluster.
|
||||
./hack/prepare-for-integration-tests.sh -c --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh"
|
||||
|
||||
# Run all the tests that depend on AD.
|
||||
source /tmp/integration-test-env && go test -v -race -count 1 -timeout 0 ./test/integration \
|
||||
-run "/TestSupervisorLogin_Browser/active_directory|/TestE2EFullIntegration_Browser/with_Supervisor_ActiveDirectory|/TestActiveDirectoryIDPPhaseAndConditions_Parallel|/TestSupervisorWarnings_Browser/Active_Directory"
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Remove all bindings for the service account from the secret.
|
||||
# The binding was only needed during the first boot of the VM.
|
||||
gcloud secrets remove-iam-policy-binding active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
--member "serviceAccount:${dcServiceAccount}" --role roles/secretmanager.secretAccessor \
|
||||
--all
|
||||
|
||||
# Remove the firewall rule which allows incoming RDP connections.
|
||||
# If you need to RDP to this AD VM in the future, then you will need to create
|
||||
# a new firewall rule to allow it.
|
||||
gcloud compute firewall-rules delete allow-rdp-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--quiet
|
||||
|
||||
# Remove all temp files. It's okay to remove the private key for our CA because we
|
||||
# created certs that are good for 100 years, as long as you have already added the
|
||||
# public cert to the concourse-secrets secret. If we need to create a new AD VM, we
|
||||
# can also create a new CA.
|
||||
rm ad-ca.crt ad-ca.key client.crt client.csr v3ext.txt
|
||||
```
|
||||
9
ADOPTERS.md
Normal file
9
ADOPTERS.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Pinniped Adopters
|
||||
|
||||
These organizations are using Pinniped.
|
||||
|
||||
* [VMware Tanzu](https://tanzu.vmware.com/) ([Tanzu Mission Control](https://tanzu.vmware.com/mission-control))
|
||||
|
||||
If you are using Pinniped and are not on this list, you can open a [pull
|
||||
request](https://github.com/vmware-tanzu/pinniped/issues/new?template=feature-proposal.md)
|
||||
to add yourself.
|
||||
@@ -1 +0,0 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/CODE_OF_CONDUCT.md
|
||||
@@ -1 +0,0 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/CONTRIBUTING.md
|
||||
37
Dockerfile
Normal file
37
Dockerfile
Normal file
@@ -0,0 +1,37 @@
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM golang:1.15.2 as build-env
|
||||
|
||||
WORKDIR /work
|
||||
# Get dependencies first so they can be cached as a layer
|
||||
COPY go.* ./
|
||||
COPY generated/1.19/apis/go.* ./generated/1.19/apis/
|
||||
COPY generated/1.19/client/go.* ./generated/1.19/client/
|
||||
RUN go mod download
|
||||
|
||||
# Copy only the production source code to avoid cache misses when editing other files
|
||||
COPY generated ./generated
|
||||
COPY cmd ./cmd
|
||||
COPY internal ./internal
|
||||
COPY pkg ./pkg
|
||||
COPY tools ./tools
|
||||
COPY hack ./hack
|
||||
|
||||
# Build the executable binary (CGO_ENABLED=0 means static linking)
|
||||
RUN mkdir out \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(hack/get-ldflags.sh)" -o out ./cmd/pinniped-server/... \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out ./cmd/local-user-authenticator/...
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.5-slim
|
||||
|
||||
# Copy the binaries from the build-env stage
|
||||
COPY --from=build-env /work/out/pinniped-server /usr/local/bin/pinniped-server
|
||||
COPY --from=build-env /work/out/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||
|
||||
# Document the port
|
||||
EXPOSE 443
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/pinniped-server"]
|
||||
@@ -1 +1,17 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/MAINTAINERS.md
|
||||
# Pinniped Maintainers
|
||||
|
||||
This is the current list of maintainers for the Pinniped project.
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| --------------- | --------- | ----------- |
|
||||
| Andrew Keesler | [ankeesler](https://github.com/ankeesler) | [VMware](https://www.github.com/vmware/) |
|
||||
| Matt Moyer | [mattmoyer](https://github.com/mattmoyer) | [VMware](https://www.github.com/vmware/) |
|
||||
| Pablo Schuhmacher | [pabloschuhmacher](https://github.com/pabloschuhmacher) | [VMware](https://www.github.com/vmware/) |
|
||||
| Ryan Richard | [cfryanr](https://github.com/cfryanr) | [VMware](https://www.github.com/vmware/) |
|
||||
|
||||
## Pinniped Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
| ----------------------------- | :---------------------: |
|
||||
| Technical Lead | Matt Moyer (mattmoyer) |
|
||||
| Product Management | Pablo Schuhmacher (pabloschuhmacher) |
|
||||
|
||||
230
README.md
230
README.md
@@ -1,219 +1,51 @@
|
||||
# Pinniped's `ci` branch
|
||||
<img src="doc/img/pinniped_logo_with_text_on_right.svg" alt="Pinniped Logo" width="100%"/>
|
||||
|
||||
This `ci` branch contains the CI/CD tooling for [Pinniped](https://github.com/vmware/pinniped).
|
||||
## Overview
|
||||
|
||||
The documentation and code in this branch is mainly intended for the maintainers of Pinniped.
|
||||
Pinniped provides identity services to Kubernetes.
|
||||
|
||||
This branch is not intended to be merged to the `main` branch.
|
||||
Pinniped allows cluster administrators to easily plug in external identity
|
||||
providers (IDPs) into Kubernetes clusters. This is achieved via a uniform
|
||||
install procedure across all types and origins of Kubernetes clusters,
|
||||
declarative configuration via Kubernetes APIs, enterprise-grade integrations
|
||||
with IDPs, and distribution-specific integration strategies.
|
||||
|
||||
The code in the branch previously lived in a private repository. It was made public by moving
|
||||
the code into the `ci` branch of the Pinniped repository in late 2024. The previous git history
|
||||
for these files was not copied from the private repository at the time of this migration.
|
||||
### Example Use Cases
|
||||
|
||||
## Reporting an issue in this branch
|
||||
* Your team uses a large enterprise IDP, and has many clusters that they
|
||||
manage. Pinniped provides:
|
||||
* Seamless and robust integration with the IDP
|
||||
* Easy installation across clusters of any type and origin
|
||||
* A simplified login flow across all clusters
|
||||
* Your team shares a single cluster. Pinniped provides:
|
||||
* Simple configuration to integrate an IDP
|
||||
* Individual, revocable identities
|
||||
|
||||
Found a bug or would like to make an enhancement request?
|
||||
Please report issues in [this repo](https://github.com/vmware/pinniped).
|
||||
### Architecture
|
||||
|
||||
## Reporting security vulnerabilities
|
||||
Pinniped offers credential exchange to enable a user to exchange an external IDP
|
||||
credential for a short-lived, cluster-specific credential. Pinniped supports various
|
||||
IDP types and implements different integration strategies for various Kubernetes
|
||||
distributions to make authentication possible.
|
||||
|
||||
Please follow the procedure described in [SECURITY.md](https://github.com/vmware/pinniped/blob/main/SECURITY.md).
|
||||
To learn more, see [doc/architecture.md](doc/architecture.md).
|
||||
|
||||
## Creating a release
|
||||
<img src="doc/img/pinniped_architecture.svg" alt="Pinniped Architecture Sketch" width="300px"/>
|
||||
|
||||
When the team is preparing to ship a release, a maintainer will create a new
|
||||
GitHub [Issue](https://github.com/vmware/pinniped/issues/new/choose) in this repo to
|
||||
collaboratively track progress on the release checklist. As tasks are completed,
|
||||
the team will check them off. When all the tasks are completed, the issue is closed.
|
||||
## Trying Pinniped
|
||||
|
||||
The release checklist is committed to this repo as an [issue template](https://github.com/vmware/pinniped/tree/main/.github/ISSUE_TEMPLATE/release_checklist.md).
|
||||
Care to kick the tires? It's easy to [install and try Pinniped](doc/demo.md).
|
||||
|
||||
## Pipelines
|
||||
## Contributions
|
||||
|
||||
Pinniped uses [Concourse](https://concourse-ci.org) for CI/CD.
|
||||
We are currently running our Concourse on a network that can only be reached from inside the corporate network at [ci.pinniped.broadcom.net](https://ci.pinniped.broadcom.net).
|
||||
Contributions are welcome. Before contributing, please see the [contributing guide](doc/contributing.md).
|
||||
|
||||
The following pipelines are implemented in this branch. Not all pipelines are necessarily publicly visible, although our goal is to make them all visible.
|
||||
## Reporting Security Vulnerabilities
|
||||
|
||||
- `main`
|
||||
|
||||
This is the main pipeline that runs on merges to `main`. It builds, tests, and (when manually triggered) releases from main.
|
||||
|
||||
- `pull-requests`
|
||||
|
||||
This is a pipeline that triggers for each open pull request. It runs a smaller subset of the integration tests and validations as `pinniped`.
|
||||
|
||||
- `dockerfile-builders`
|
||||
|
||||
This pipeline builds a bunch of custom utility container images that are used in our CI and testing.
|
||||
|
||||
- `build-gi-cli` (a container image that includes the GitHub CLI)
|
||||
- `build-github-pr-resource` (a [fork](https://github.com/pinniped-ci-bot/github-pr-resource) of the `github-pr-resource` with support for gating PRs for untrusted users)
|
||||
- `build-code-coverage-uploader` (uploading code coverage during unit tests)
|
||||
- `build-eks-deployer-dockerfile` (deploying our app to EKS clusters)
|
||||
- `build-k8s-app-deployer-dockerfile` (deploying our app to clusters)
|
||||
- `build-pool-trigger-resource-dockerfile` (an updated implementation of the [pool-trigger-resource](https://github.com/cfmobile/pool-trigger-resource) for use in our CI)
|
||||
- `build-integration-test-runner-dockerfile` (running our integration tests)
|
||||
- `build-integration-test-runner-beta-dockerfile` (running our integration tests with the latest Chrome beta version)
|
||||
- `build-deployment-yaml-formatter-dockerfile` (templating our deployment YAML during a release)
|
||||
- `build-crane` (copy and tag container images during release)
|
||||
- `build-k8s-code-generator-*` (running our Kubernetes code generation under different Kubernetes dependency versions)
|
||||
- `build-test-dex` (a Dex used during tests)
|
||||
- `build-test-cfssl` (a cfssl used during tests)
|
||||
- `build-test-kubectl` (a kubectl used during tests)
|
||||
- `build-test-forward-proxy` (a Squid forward proxy used during tests)
|
||||
- `build-test-bitnami-ldap` (an OpenLDAP used during tests)
|
||||
|
||||
- `cleanup-aws`
|
||||
|
||||
This runs a script that runs [aws-nuke](https://github.com/rebuy-de/aws-nuke) against our test AWS account.
|
||||
This was occasionally needed because [eksctl](https://eksctl.io/) sometimes fails and leaks AWS resources. These resources cost money and use up our AWS quota.
|
||||
However, we seem to have worked around these issues and this pipeline has not been used for some time.
|
||||
|
||||
These jobs are only triggered manually. This is dangerous and should be used with care.
|
||||
|
||||
- `concourse-workers`
|
||||
|
||||
Deploys worker replicas on a long-lived GKE cluster that runs the Concourse workers, and can scale them up or down.
|
||||
|
||||
- `go-compatibility`
|
||||
|
||||
This pipeline runs nightly jobs that validate the compatibility of our code as a Go module in various contexts. We have jobs that test that our code compiles under older Go versions and that our CLI can be installed using `go install`.
|
||||
|
||||
- `security-scan`
|
||||
|
||||
This pipeline has nightly jobs that run security scans on our current main branch and most recently released artifacts.
|
||||
|
||||
The tools we use are:
|
||||
- [sonatype-nexus-community/nancy](https://github.com/sonatype-nexus-community/nancy), which scans Go module versions.
|
||||
- [aquasecurity/trivy](https://github.com/aquasecurity/trivy), which scans container images and Go binaries.
|
||||
- [govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck), which scans Go code to find calls to known-vulnerable dependencies.
|
||||
|
||||
This pipeline also has a job called `all-golang-deps-updated` which automatically submits PRs to update all
|
||||
direct dependencies in Pinniped's go.mod file, and update the Golang and distroless container images used in
|
||||
Pinniped's Dockerfiles.
|
||||
|
||||
- `kind-node-builder`
|
||||
|
||||
A nightly build job which uses the latest version of kind to build the HEAD of master of Kubernetes as a container
|
||||
image that can be used to deploy kind clusters. Other pipelines use this container image to install Pinniped and run
|
||||
integration tests. This gives us insight in any compatibility problems with the upcoming next release of Kubernetes.
|
||||
|
||||
## Deploying pipeline changes
|
||||
|
||||
After any shared tasks (`./pipelines/shared-tasks`) or helpers (`./pipelines/shared-helpers`) are edited,
|
||||
the commits must be pushed to the `ci` branch of this repository to take effect.
|
||||
|
||||
After editing any CI secrets or pipeline definitions, a maintainer must run the corresponding
|
||||
`./pipelines/$PIPELINE_NAME/update-pipeline.sh` script to apply the changes to Concourse.
|
||||
To deploy _all_ pipelines, a maintainer can run `./pipelines/update-all-pipelines.sh`.
|
||||
Don't forget to commit and push your changes after applying them!
|
||||
|
||||
## Github webhooks for pipelines
|
||||
|
||||
Some pipelines use github [webhooks to trigger resource checks](https://concourse-ci.org/resources.html#schema.resource.webhook_token),
|
||||
rather than the default of polling every minute, to make these pipelines more responsive and use fewer compute resources
|
||||
for running checks. Refer to places where `webhook_token` is configured in various `pipeline.yml` files.
|
||||
|
||||
To make these webhooks work, they must be defined on the [GitHub repo's settings](https://github.com/vmware/pinniped/settings/hooks).
|
||||
|
||||
## Installing and operating Concourse
|
||||
|
||||
See [infra/README.md](./infra/README.md) for details about how Concourse was installed and how it can be operated.
|
||||
|
||||
## Acceptance environments
|
||||
|
||||
In addition to the many ephemeral Kubernetes clusters we use for testing, we also deploy a long-running acceptance environment.
|
||||
|
||||
Google Kubernetes Engine (GKE) in the `gke-acceptance-cluster` cluster in our GCP project in the `us-west1-c` availability zone.
|
||||
|
||||
To access this cluster, download the kubeconfig to `gke-acceptance.yaml` by running:
|
||||
|
||||
```cmd
|
||||
KUBECONFIG=gke-acceptance.yaml gcloud container clusters get-credentials gke-acceptance-cluster --project "$PINNIPED_GCP_PROJECT" --zone us-west1-c
|
||||
```
|
||||
|
||||
The above command assumes that you have already set `PINNIPED_GCP_PROJECT` to be the name of the GCP project.
|
||||
|
||||
## CI secrets
|
||||
|
||||
We use [Google Secret Manager](https://cloud.google.com/secret-manager) on GCP to store build/test/release secrets.
|
||||
These secrets are only available to the maintainers.
|
||||
|
||||
Using the `gcloud secrets list` command or the [web console](https://console.cloud.google.com/security/secret-manager),
|
||||
you can list the available secrets. The content of each secret is a YAML file with secret key/value pairs.
|
||||
You can also use the `./hack/edit-gcloud-secret.sh <secretName>` script to edit or inspect each secret.
|
||||
|
||||
## Configure Azure for CI to test on AKS
|
||||
|
||||
There are several CI jobs which test that Pinniped works when installed on Azure's AKS.
|
||||
For these jobs to run, they need to be able to create and delete ephemeral AKS clusters.
|
||||
This requires the following:
|
||||
|
||||
1. An active Azure Subscription. (A "subscription" in Azure is the equivalent of an "account" in AWS or a "project" in GCP.)
|
||||
2. An Azure App Registration (basically, a service account) active in the same Directory (aka tenant) as the Subscription.
|
||||
Create the app in "My Organization Only". It does not need a redirect URI or any other optional settings.
|
||||
Create a client secret for this app. If you want the client secret to have a long lifetime, you can use the `az` CLI to create it.
|
||||
In the Subscription's IAM settings, assign this app the role "Azure Kubernetes Service Contributor Role" to allow
|
||||
the app to manage AKS clusters. Also assign this app the role "Reader" to allow it to read all resources
|
||||
(used by the `remove-orphaned-aks-clusters` CI task).
|
||||
Do not grant this app permissions in any other Subscription or use it for any other purpose.
|
||||
3. Configure the pipelines with the app's Application (client) ID, Client Secret, and Directory (tenant) ID
|
||||
as the appropriate secret values.
|
||||
|
||||
The CI jobs will create and delete AKS clusters in a Resource Group called `pinniped-ci` within the provided Subscription.
|
||||
|
||||
## Configure AWS for CI to test on EKS
|
||||
|
||||
There are several CI jobs which test that Pinniped works when installed on Amazon's EKS.
|
||||
For these jobs to run, they need to be able to create and delete ephemeral EKS clusters.
|
||||
There are also some jobs to cleanup any orphaned resources (e.g. IP addresses) in the AWS account.
|
||||
These jobs requires the following:
|
||||
|
||||
1. An active AWS account, which will only be used for this purpose.
|
||||
2. Two IAM users in that account, each with a role that can be assumed.
|
||||
These IAM users which should only be used for Pinniped CI and no other purpose.
|
||||
They should only have permissions to perform AWS actions in the relevant AWS account, and no other account.
|
||||
3. The first user and role should have permission to create and delete EKS clusters using `eksctl`.
|
||||
The permissions required can be found in the [eksctl docs](https://eksctl.io/usage/minimum-iam-policies).
|
||||
The user also needs permission to run `aws logs put-retention-policy`, `aws ec2 describe-nat-gateways`,
|
||||
and `aws ec2 delete-nat-gateway`.
|
||||
4. The second user and role should have broad permissions to get and delete everything in the account.
|
||||
It will be used to run `aws-nuke` to list and/or clean resources from the AWS account.
|
||||
To use `aws-nuke`, the user also needs to have an AWS account alias
|
||||
(see the [cleanup-aws task](pipelines/shared-tasks/cleanup-aws/task.sh) for details).
|
||||
|
||||
## Setting Up Active Directory Test Environment
|
||||
|
||||
To test the `ActiveDirectoryIdentityProvider` functionality, we have a long-running Active Directory Domain Controller
|
||||
server instance in our GCP account. See [AD-SETUP.md](AD-SETUP.md) for details.
|
||||
|
||||
## Running integration tests on your laptop using AD
|
||||
|
||||
The relevant environment variables can be pulled from the secret manager via the `hack/get-active-directory-env-vars.sh` script.
|
||||
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
|
||||
|
||||
```bash
|
||||
# Must authenticate to glcoud to access the secret manager.
|
||||
gcloud auth login
|
||||
# In the pinniped repo's main branch or in your PR branch:
|
||||
hack/prepare-for-integration-tests.sh --get-active-directory-vars "$HOME/path/to/pinniped-ci-branch/hack/get-active-directory-env-vars.sh"
|
||||
```
|
||||
|
||||
## Running integration tests on your laptop using GitHub
|
||||
|
||||
The relevant environment variables can be pulled from the secret manager via the `hack/get-github-env-vars.sh` script.
|
||||
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
|
||||
|
||||
```bash
|
||||
# Must authenticate to glcoud to access the secret manager.
|
||||
gcloud auth login
|
||||
# In the pinniped repo's main branch or in your PR branch:
|
||||
hack/prepare-for-integration-tests.sh --get-github-vars "$HOME/path/to/pinniped-ci-branch/hack/get-github-env-vars.sh"
|
||||
```
|
||||
Please follow the procedure described in [SECURITY.md](SECURITY.md).
|
||||
|
||||
## License
|
||||
|
||||
Pinniped is open source and licensed under Apache License Version 2.0. See [LICENSE](LICENSE).
|
||||
|
||||
Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
|
||||
13
SECURITY.md
13
SECURITY.md
@@ -1 +1,12 @@
|
||||
Please see https://github.com/vmware/pinniped/blob/main/SECURITY.md
|
||||
# Reporting a Vulnerability
|
||||
|
||||
Pinniped development is sponsored by VMware, and the Pinniped team encourages users
|
||||
who become aware of a security vulnerability in Pinniped to report any potential
|
||||
vulnerabilities found to security@vmware.com. If possible, please include a description
|
||||
of the effects of the vulnerability, reproduction steps, and a description of in which
|
||||
version of Pinniped or its dependencies the vulnerability was discovered.
|
||||
The use of encrypted email is encouraged. The public PGP key can be found at https://kb.vmware.com/kb/1055.
|
||||
|
||||
The Pinniped team hopes that users encountering a new vulnerability will contact
|
||||
us privately as it is in the best interests of our users that the Pinniped team has
|
||||
an opportunity to investigate and confirm a suspected vulnerability before it becomes public knowledge.
|
||||
|
||||
5
apis/README.md
Normal file
5
apis/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# API Generation Templates
|
||||
|
||||
This directory contains a template for generating our Kubernetes API code across several Kubernetes versions.
|
||||
|
||||
See the [`./generated`](../generated) directory for the rendered output.
|
||||
8
apis/config/doc.go.tmpl
Normal file
8
apis/config/doc.go.tmpl
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=config.pinniped.dev
|
||||
|
||||
// Package config is the internal version of the Pinniped configuration API.
|
||||
package config
|
||||
4
apis/config/types.go.tmpl
Normal file
4
apis/config/types.go.tmpl
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package config
|
||||
4
apis/config/v1alpha1/conversion.go.tmpl
Normal file
4
apis/config/v1alpha1/conversion.go.tmpl
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
12
apis/config/v1alpha1/defaults.go.tmpl
Normal file
12
apis/config/v1alpha1/defaults.go.tmpl
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
11
apis/config/v1alpha1/doc.go.tmpl
Normal file
11
apis/config/v1alpha1/doc.go.tmpl
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/config
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=config.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped configuration API.
|
||||
package v1alpha1
|
||||
43
apis/config/v1alpha1/register.go.tmpl
Normal file
43
apis/config/v1alpha1/register.go.tmpl
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "config.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&CredentialIssuerConfig{},
|
||||
&CredentialIssuerConfigList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
91
apis/config/v1alpha1/types.go.tmpl
Normal file
91
apis/config/v1alpha1/types.go.tmpl
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +kubebuilder:validation:Enum=KubeClusterSigningCertificate
|
||||
type StrategyType string
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Error
|
||||
type StrategyStatus string
|
||||
|
||||
// +kubebuilder:validation:Enum=FetchedKey;CouldNotFetchKey
|
||||
type StrategyReason string
|
||||
|
||||
const (
|
||||
KubeClusterSigningCertificateStrategyType = StrategyType("KubeClusterSigningCertificate")
|
||||
|
||||
SuccessStrategyStatus = StrategyStatus("Success")
|
||||
ErrorStrategyStatus = StrategyStatus("Error")
|
||||
|
||||
CouldNotFetchKeyStrategyReason = StrategyReason("CouldNotFetchKey")
|
||||
FetchedKeyStrategyReason = StrategyReason("FetchedKey")
|
||||
)
|
||||
|
||||
// Status of a credential issuer.
|
||||
type CredentialIssuerConfigStatus struct {
|
||||
// List of integration strategies that were attempted by Pinniped.
|
||||
Strategies []CredentialIssuerConfigStrategy `json:"strategies"`
|
||||
|
||||
// Information needed to form a valid Pinniped-based kubeconfig using this credential issuer.
|
||||
// +optional
|
||||
KubeConfigInfo *CredentialIssuerConfigKubeConfigInfo `json:"kubeConfigInfo,omitempty"`
|
||||
}
|
||||
|
||||
// Information needed to form a valid Pinniped-based kubeconfig using this credential issuer.
|
||||
type CredentialIssuerConfigKubeConfigInfo struct {
|
||||
// The K8s API server URL.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:Pattern=`^https://|^http://`
|
||||
Server string `json:"server"`
|
||||
|
||||
// The K8s API server CA bundle.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
CertificateAuthorityData string `json:"certificateAuthorityData"`
|
||||
}
|
||||
|
||||
// Status of an integration strategy that was attempted by Pinniped.
|
||||
type CredentialIssuerConfigStrategy struct {
|
||||
// Type of integration attempted.
|
||||
Type StrategyType `json:"type"`
|
||||
|
||||
// Status of the attempted integration strategy.
|
||||
Status StrategyStatus `json:"status"`
|
||||
|
||||
// Reason for the current status.
|
||||
Reason StrategyReason `json:"reason"`
|
||||
|
||||
// Human-readable description of the current status.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Message string `json:"message"`
|
||||
|
||||
// When the status was last checked.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
|
||||
}
|
||||
|
||||
|
||||
// Describes the configuration status of a Pinniped credential issuer.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:resource:shortName=cic
|
||||
|
||||
type CredentialIssuerConfig struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Status of the credential issuer.
|
||||
Status CredentialIssuerConfigStatus `json:"status"`
|
||||
}
|
||||
|
||||
|
||||
// List of CredentialIssuerConfig objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type CredentialIssuerConfigList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []CredentialIssuerConfig `json:"items"`
|
||||
}
|
||||
8
apis/idp/doc.go.tmpl
Normal file
8
apis/idp/doc.go.tmpl
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=idp.pinniped.dev
|
||||
|
||||
// Package idp is the internal version of the Pinniped identity provider API.
|
||||
package idp
|
||||
4
apis/idp/v1alpha1/conversion.go.tmpl
Normal file
4
apis/idp/v1alpha1/conversion.go.tmpl
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
12
apis/idp/v1alpha1/defaults.go.tmpl
Normal file
12
apis/idp/v1alpha1/defaults.go.tmpl
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
12
apis/idp/v1alpha1/doc.go.tmpl
Normal file
12
apis/idp/v1alpha1/doc.go.tmpl
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/idp
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=idp.pinniped.dev
|
||||
// +groupGoName=IDP
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped identity provider API.
|
||||
package v1alpha1
|
||||
43
apis/idp/v1alpha1/register.go.tmpl
Normal file
43
apis/idp/v1alpha1/register.go.tmpl
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "idp.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&WebhookIdentityProvider{},
|
||||
&WebhookIdentityProviderList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
75
apis/idp/v1alpha1/types_meta.go.tmpl
Normal file
75
apis/idp/v1alpha1/types_meta.go.tmpl
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ConditionStatus is effectively an enum type for Condition.Status.
|
||||
type ConditionStatus string
|
||||
|
||||
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
|
||||
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
|
||||
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||
// intermediate conditions, e.g. ConditionDegraded.
|
||||
const (
|
||||
ConditionTrue ConditionStatus = "True"
|
||||
ConditionFalse ConditionStatus = "False"
|
||||
ConditionUnknown ConditionStatus = "Unknown"
|
||||
)
|
||||
|
||||
// Condition status of a resource (mirrored from the metav1.Condition type added in Kubernetes 1.19). In a future API
|
||||
// version we can switch to using the upstream type.
|
||||
// See https://github.com/kubernetes/apimachinery/blob/v0.19.0/pkg/apis/meta/v1/types.go#L1353-L1413.
|
||||
type Condition struct {
|
||||
// type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
// ---
|
||||
// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
|
||||
// useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
|
||||
// +kubebuilder:validation:MaxLength=316
|
||||
Type string `json:"type"`
|
||||
|
||||
// status of the condition, one of True, False, Unknown.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Enum=True;False;Unknown
|
||||
Status ConditionStatus `json:"status"`
|
||||
|
||||
// observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
// with respect to the current state of the instance.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Format=date-time
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
|
||||
|
||||
// reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
// Producers of specific condition types may define expected values and meanings for this field,
|
||||
// and whether the values are considered a guaranteed API.
|
||||
// The value should be a CamelCase string.
|
||||
// This field may not be empty.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:MaxLength=1024
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
|
||||
Reason string `json:"reason"`
|
||||
|
||||
// message is a human readable message indicating details about the transition.
|
||||
// This may be an empty string.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:MaxLength=32768
|
||||
Message string `json:"message"`
|
||||
}
|
||||
11
apis/idp/v1alpha1/types_tls.go.tmpl
Normal file
11
apis/idp/v1alpha1/types_tls.go.tmpl
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// Configuration for configuring TLS on various identity providers.
|
||||
type TLSSpec struct {
|
||||
// X.509 Certificate Authority (base64-encoded PEM bundle). If omitted, a default set of system roots will be trusted.
|
||||
// +optional
|
||||
CertificateAuthorityData string `json:"certificateAuthorityData,omitempty"`
|
||||
}
|
||||
53
apis/idp/v1alpha1/types_webhook.go.tmpl
Normal file
53
apis/idp/v1alpha1/types_webhook.go.tmpl
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// Status of a webhook identity provider.
|
||||
type WebhookIdentityProviderStatus struct {
|
||||
// Represents the observations of an identity provider's current state.
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
|
||||
}
|
||||
|
||||
// Spec for configuring a webhook identity provider.
|
||||
type WebhookIdentityProviderSpec struct {
|
||||
// Webhook server endpoint URL.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:Pattern=`^https://`
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// TLS configuration.
|
||||
// +optional
|
||||
TLS *TLSSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
// WebhookIdentityProvider describes the configuration of a Pinniped webhook identity provider.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:resource:categories=all;idp;idps,shortName=webhookidp;webhookidps
|
||||
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
|
||||
type WebhookIdentityProvider struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Spec for configuring the identity provider.
|
||||
Spec WebhookIdentityProviderSpec `json:"spec"`
|
||||
|
||||
// Status of the identity provider.
|
||||
Status WebhookIdentityProviderStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// List of WebhookIdentityProvider objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type WebhookIdentityProviderList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []WebhookIdentityProvider `json:"items"`
|
||||
}
|
||||
8
apis/login/doc.go.tmpl
Normal file
8
apis/login/doc.go.tmpl
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=login.pinniped.dev
|
||||
|
||||
// Package login is the internal version of the Pinniped login API.
|
||||
package login
|
||||
38
apis/login/register.go.tmpl
Normal file
38
apis/login/register.go.tmpl
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package login
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "login.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind.
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns back a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&TokenCredentialRequest{},
|
||||
&TokenCredentialRequestList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
21
apis/login/types_clustercred.go.tmpl
Normal file
21
apis/login/types_clustercred.go.tmpl
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package login
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ClusterCredential is a credential (token or certificate) which is valid on the Kubernetes cluster.
|
||||
type ClusterCredential struct {
|
||||
// ExpirationTimestamp indicates a time when the provided credentials expire.
|
||||
ExpirationTimestamp metav1.Time
|
||||
|
||||
// Token is a bearer token used by the client for request authentication.
|
||||
Token string
|
||||
|
||||
// PEM-encoded client TLS certificates (including intermediates, if any).
|
||||
ClientCertificateData string
|
||||
|
||||
// PEM-encoded private key for the above certificate.
|
||||
ClientKeyData string
|
||||
}
|
||||
48
apis/login/types_token.go.tmpl
Normal file
48
apis/login/types_token.go.tmpl
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package login
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type TokenCredentialRequestSpec struct {
|
||||
// Bearer token supplied with the credential request.
|
||||
Token string
|
||||
|
||||
// Reference to an identity provider which can fulfill this credential request.
|
||||
IdentityProvider corev1.TypedLocalObjectReference
|
||||
}
|
||||
|
||||
type TokenCredentialRequestStatus struct {
|
||||
// A ClusterCredential will be returned for a successful credential request.
|
||||
// +optional
|
||||
Credential *ClusterCredential
|
||||
|
||||
// An error message will be returned for an unsuccessful credential request.
|
||||
// +optional
|
||||
Message *string
|
||||
}
|
||||
|
||||
// TokenCredentialRequest submits an IDP-specific credential to Pinniped in exchange for a cluster-specific credential.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequest struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
|
||||
Spec TokenCredentialRequestSpec
|
||||
Status TokenCredentialRequestStatus
|
||||
}
|
||||
|
||||
// TokenCredentialRequestList is a list of TokenCredentialRequest objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequestList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is a list of TokenCredentialRequest
|
||||
Items []TokenCredentialRequest
|
||||
}
|
||||
4
apis/login/v1alpha1/conversion.go.tmpl
Normal file
4
apis/login/v1alpha1/conversion.go.tmpl
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
12
apis/login/v1alpha1/defaults.go.tmpl
Normal file
12
apis/login/v1alpha1/defaults.go.tmpl
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
11
apis/login/v1alpha1/doc.go.tmpl
Normal file
11
apis/login/v1alpha1/doc.go.tmpl
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/login
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=login.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped login API.
|
||||
package v1alpha1
|
||||
43
apis/login/v1alpha1/register.go.tmpl
Normal file
43
apis/login/v1alpha1/register.go.tmpl
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "login.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&TokenCredentialRequest{},
|
||||
&TokenCredentialRequestList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
22
apis/login/v1alpha1/types_clustercred.go.tmpl
Normal file
22
apis/login/v1alpha1/types_clustercred.go.tmpl
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ClusterCredential is the cluster-specific credential returned on a successful credential request. It
|
||||
// contains either a valid bearer token or a valid TLS certificate and corresponding private key for the cluster.
|
||||
type ClusterCredential struct {
|
||||
// ExpirationTimestamp indicates a time when the provided credentials expire.
|
||||
ExpirationTimestamp metav1.Time `json:"expirationTimestamp,omitempty"`
|
||||
|
||||
// Token is a bearer token used by the client for request authentication.
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// PEM-encoded client TLS certificates (including intermediates, if any).
|
||||
ClientCertificateData string `json:"clientCertificateData,omitempty"`
|
||||
|
||||
// PEM-encoded private key for the above certificate.
|
||||
ClientKeyData string `json:"clientKeyData,omitempty"`
|
||||
}
|
||||
49
apis/login/v1alpha1/types_token.go.tmpl
Normal file
49
apis/login/v1alpha1/types_token.go.tmpl
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TokenCredentialRequestSpec is the specification of a TokenCredentialRequest, expected on requests to the Pinniped API.
|
||||
type TokenCredentialRequestSpec struct {
|
||||
// Bearer token supplied with the credential request.
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// Reference to an identity provider which can fulfill this credential request.
|
||||
IdentityProvider corev1.TypedLocalObjectReference `json:"identityProvider"`
|
||||
}
|
||||
|
||||
// TokenCredentialRequestStatus is the status of a TokenCredentialRequest, returned on responses to the Pinniped API.
|
||||
type TokenCredentialRequestStatus struct {
|
||||
// A Credential will be returned for a successful credential request.
|
||||
// +optional
|
||||
Credential *ClusterCredential `json:"credential,omitempty"`
|
||||
|
||||
// An error message will be returned for an unsuccessful credential request.
|
||||
// +optional
|
||||
Message *string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// TokenCredentialRequest submits an IDP-specific credential to Pinniped in exchange for a cluster-specific credential.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec TokenCredentialRequestSpec `json:"spec,omitempty"`
|
||||
Status TokenCredentialRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// TokenCredentialRequestList is a list of TokenCredentialRequest objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []TokenCredentialRequest `json:"items"`
|
||||
}
|
||||
397
cmd/local-user-authenticator/main.go
Normal file
397
cmd/local-user-authenticator/main.go
Normal file
@@ -0,0 +1,397 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package main provides a authentication webhook program.
|
||||
//
|
||||
// This webhook is meant to be used in demo settings to play around with
|
||||
// Pinniped. As well, it can come in handy in integration tests.
|
||||
//
|
||||
// This webhook is NOT meant for use in production systems.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/controller/apicerts"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
)
|
||||
|
||||
const (
|
||||
// This string must match the name of the Namespace declared in the deployment yaml.
|
||||
namespace = "local-user-authenticator"
|
||||
// This string must match the name of the Service declared in the deployment yaml.
|
||||
serviceName = "local-user-authenticator"
|
||||
|
||||
singletonWorker = 1
|
||||
defaultResyncInterval = 3 * time.Minute
|
||||
|
||||
invalidRequest = constable.Error("invalid request")
|
||||
)
|
||||
|
||||
type webhook struct {
|
||||
certProvider dynamiccert.Provider
|
||||
secretInformer corev1informers.SecretInformer
|
||||
}
|
||||
|
||||
func newWebhook(
|
||||
certProvider dynamiccert.Provider,
|
||||
secretInformer corev1informers.SecretInformer,
|
||||
) *webhook {
|
||||
return &webhook{
|
||||
certProvider: certProvider,
|
||||
secretInformer: secretInformer,
|
||||
}
|
||||
}
|
||||
|
||||
// start runs the webhook in a separate goroutine and returns whether or not the
|
||||
// webhook was started successfully.
|
||||
func (w *webhook) start(ctx context.Context, l net.Listener) error {
|
||||
server := http.Server{
|
||||
Handler: w,
|
||||
TLSConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS13,
|
||||
GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certPEM, keyPEM := w.certProvider.CurrentCertKeyContent()
|
||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
return &cert, err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
// Per ListenAndServeTLS doc, the {cert,key}File parameters can be empty
|
||||
// since we want to use the certs from http.Server.TLSConfig.
|
||||
errCh <- server.ServeTLS(l, "", "")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
klog.InfoS("server exited", "err", err)
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("server context cancelled", "err", ctx.Err())
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
klog.InfoS("server shutdown failed", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webhook) ServeHTTP(rsp http.ResponseWriter, req *http.Request) {
|
||||
username, password, err := getUsernameAndPasswordFromRequest(rsp, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
secret, err := w.secretInformer.Lister().Secrets(namespace).Get(username)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
klog.InfoS("could not get secret", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if notFound {
|
||||
klog.InfoS("user not found")
|
||||
respondWithUnauthenticated(rsp)
|
||||
return
|
||||
}
|
||||
|
||||
passwordMatches := bcrypt.CompareHashAndPassword(
|
||||
secret.Data["passwordHash"],
|
||||
[]byte(password),
|
||||
) == nil
|
||||
if !passwordMatches {
|
||||
klog.InfoS("authentication failed: wrong password")
|
||||
respondWithUnauthenticated(rsp)
|
||||
return
|
||||
}
|
||||
|
||||
groups := []string{}
|
||||
groupsBuf := bytes.NewBuffer(secret.Data["groups"])
|
||||
if groupsBuf.Len() > 0 {
|
||||
groupsCSVReader := csv.NewReader(groupsBuf)
|
||||
groups, err = groupsCSVReader.Read()
|
||||
if err != nil {
|
||||
klog.InfoS("could not read groups", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
trimLeadingAndTrailingWhitespace(groups)
|
||||
}
|
||||
|
||||
klog.InfoS("successful authentication")
|
||||
respondWithAuthenticated(rsp, secret.ObjectMeta.Name, string(secret.UID), groups)
|
||||
}
|
||||
|
||||
func getUsernameAndPasswordFromRequest(rsp http.ResponseWriter, req *http.Request) (string, string, error) {
|
||||
if req.URL.Path != "/authenticate" {
|
||||
klog.InfoS("received request path other than /authenticate", "path", req.URL.Path)
|
||||
rsp.WriteHeader(http.StatusNotFound)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if req.Method != http.MethodPost {
|
||||
klog.InfoS("received request method other than post", "method", req.Method)
|
||||
rsp.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if !headerContains(req, "Content-Type", "application/json") {
|
||||
klog.InfoS("content type is not application/json", "Content-Type", req.Header.Values("Content-Type"))
|
||||
rsp.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if !headerContains(req, "Accept", "application/json") &&
|
||||
!headerContains(req, "Accept", "application/*") &&
|
||||
!headerContains(req, "Accept", "*/*") {
|
||||
klog.InfoS("client does not accept application/json", "Accept", req.Header.Values("Accept"))
|
||||
rsp.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if req.Body == nil {
|
||||
klog.InfoS("invalid nil body")
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
var body authenticationv1beta1.TokenReview
|
||||
if err := json.NewDecoder(req.Body).Decode(&body); err != nil {
|
||||
klog.InfoS("failed to decode body", "err", err)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if body.APIVersion != authenticationv1beta1.SchemeGroupVersion.String() {
|
||||
klog.InfoS("invalid TokenReview apiVersion", "apiVersion", body.APIVersion)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if body.Kind != "TokenReview" {
|
||||
klog.InfoS("invalid TokenReview kind", "kind", body.Kind)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
tokenSegments := strings.SplitN(body.Spec.Token, ":", 2)
|
||||
if len(tokenSegments) != 2 {
|
||||
klog.InfoS("bad token format in request")
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
return tokenSegments[0], tokenSegments[1], nil
|
||||
}
|
||||
|
||||
func headerContains(req *http.Request, headerName, s string) bool {
|
||||
headerValues := req.Header.Values(headerName)
|
||||
for i := range headerValues {
|
||||
mimeTypes := strings.Split(headerValues[i], ",")
|
||||
for _, mimeType := range mimeTypes {
|
||||
mediaType, _, _ := mime.ParseMediaType(mimeType)
|
||||
if mediaType == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func trimLeadingAndTrailingWhitespace(ss []string) {
|
||||
for i := range ss {
|
||||
ss[i] = strings.TrimSpace(ss[i])
|
||||
}
|
||||
}
|
||||
|
||||
func respondWithUnauthenticated(rsp http.ResponseWriter) {
|
||||
rsp.Header().Add("Content-Type", "application/json")
|
||||
|
||||
body := authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: authenticationv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: false,
|
||||
},
|
||||
}
|
||||
if err := json.NewEncoder(rsp).Encode(body); err != nil {
|
||||
klog.InfoS("could not encode response", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func respondWithAuthenticated(
|
||||
rsp http.ResponseWriter,
|
||||
username, uid string,
|
||||
groups []string,
|
||||
) {
|
||||
rsp.Header().Add("Content-Type", "application/json")
|
||||
body := authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: authenticationv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: true,
|
||||
User: authenticationv1beta1.UserInfo{
|
||||
Username: username,
|
||||
Groups: groups,
|
||||
UID: uid,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := json.NewEncoder(rsp).Encode(body); err != nil {
|
||||
klog.InfoS("could not encode response", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func newK8sClient() (kubernetes.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
func startControllers(
|
||||
ctx context.Context,
|
||||
dynamicCertProvider dynamiccert.Provider,
|
||||
kubeClient kubernetes.Interface,
|
||||
kubeInformers kubeinformers.SharedInformerFactory,
|
||||
) {
|
||||
aVeryLongTime := time.Hour * 24 * 365 * 100
|
||||
|
||||
const certsSecretResourceName = "local-user-authenticator-tls-serving-certificate"
|
||||
|
||||
// Create controller manager.
|
||||
controllerManager := controllerlib.
|
||||
NewManager().
|
||||
WithController(
|
||||
apicerts.NewCertsManagerController(
|
||||
namespace,
|
||||
certsSecretResourceName,
|
||||
kubeClient,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
controllerlib.WithInitialEvent,
|
||||
aVeryLongTime,
|
||||
"local-user-authenticator CA",
|
||||
serviceName,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
apicerts.NewCertsObserverController(
|
||||
namespace,
|
||||
certsSecretResourceName,
|
||||
dynamicCertProvider,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
)
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
|
||||
go controllerManager.Start(ctx)
|
||||
}
|
||||
|
||||
func startWebhook(
|
||||
ctx context.Context,
|
||||
l net.Listener,
|
||||
dynamicCertProvider dynamiccert.Provider,
|
||||
secretInformer corev1informers.SecretInformer,
|
||||
) error {
|
||||
return newWebhook(dynamicCertProvider, secretInformer).start(ctx, l)
|
||||
}
|
||||
|
||||
func waitForSignal() os.Signal {
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, os.Interrupt)
|
||||
return <-signalCh
|
||||
}
|
||||
|
||||
func run() error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kubeClient, err := newK8sClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(namespace),
|
||||
)
|
||||
|
||||
dynamicCertProvider := dynamiccert.New()
|
||||
|
||||
startControllers(ctx, dynamicCertProvider, kubeClient, kubeInformers)
|
||||
klog.InfoS("controllers are ready")
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
l, err := net.Listen("tcp", ":443")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create listener: %w", err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
err = startWebhook(ctx, l, dynamicCertProvider, kubeInformers.Core().V1().Secrets())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot start webhook: %w", err)
|
||||
}
|
||||
klog.InfoS("webhook is ready", "address", l.Addr().String())
|
||||
|
||||
gotSignal := waitForSignal()
|
||||
klog.InfoS("webhook exiting", "signal", gotSignal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
578
cmd/local-user-authenticator/main_test.go
Normal file
578
cmd/local-user-authenticator/main_test.go
Normal file
@@ -0,0 +1,578 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
)
|
||||
|
||||
func TestWebhook(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const namespace = "local-user-authenticator"
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
user, otherUser, colonUser, noGroupUser, oneGroupUser, passwordUndefinedUser, emptyPasswordUser, invalidPasswordHashUser, undefinedGroupsUser :=
|
||||
"some-user", "other-user", "colon-user", "no-group-user", "one-group-user", "password-undefined-user", "empty-password-user", "invalid-password-hash-user", "undefined-groups-user"
|
||||
uid, otherUID, colonUID, noGroupUID, oneGroupUID, passwordUndefinedUID, emptyPasswordUID, invalidPasswordHashUID, undefinedGroupsUID :=
|
||||
"some-uid", "other-uid", "colon-uid", "no-group-uid", "one-group-uid", "password-undefined-uid", "empty-password-uid", "invalid-password-hash-uid", "undefined-groups-uid"
|
||||
password, otherPassword, colonPassword, noGroupPassword, oneGroupPassword, undefinedGroupsPassword :=
|
||||
"some-password", "other-password", "some-:-password", "no-group-password", "one-group-password", "undefined-groups-password"
|
||||
|
||||
group0, group1 := "some-group-0", "some-group-1"
|
||||
groups := group0 + " , " + group1
|
||||
|
||||
kubeClient := kubernetesfake.NewSimpleClientset()
|
||||
addSecretToFakeClientTracker(t, kubeClient, user, uid, password, groups)
|
||||
addSecretToFakeClientTracker(t, kubeClient, otherUser, otherUID, otherPassword, groups)
|
||||
addSecretToFakeClientTracker(t, kubeClient, colonUser, colonUID, colonPassword, groups)
|
||||
addSecretToFakeClientTracker(t, kubeClient, noGroupUser, noGroupUID, noGroupPassword, "")
|
||||
addSecretToFakeClientTracker(t, kubeClient, oneGroupUser, oneGroupUID, oneGroupPassword, group0)
|
||||
addSecretToFakeClientTracker(t, kubeClient, emptyPasswordUser, emptyPasswordUID, "", groups)
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(passwordUndefinedUID),
|
||||
Name: passwordUndefinedUser,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"groups": []byte(groups),
|
||||
},
|
||||
}))
|
||||
|
||||
undefinedGroupsUserPasswordHash, err := bcrypt.GenerateFromPassword([]byte(undefinedGroupsPassword), bcrypt.MinCost)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(undefinedGroupsUID),
|
||||
Name: undefinedGroupsUser,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"passwordHash": undefinedGroupsUserPasswordHash,
|
||||
},
|
||||
}))
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(invalidPasswordHashUID),
|
||||
Name: invalidPasswordHashUser,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"groups": []byte(groups),
|
||||
"passwordHash": []byte("not a valid password hash"),
|
||||
},
|
||||
}))
|
||||
|
||||
secretInformer := createSecretInformer(t, kubeClient)
|
||||
|
||||
certProvider, caBundle, serverName := newCertProvider(t)
|
||||
w := newWebhook(certProvider, secretInformer)
|
||||
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer l.Close()
|
||||
require.NoError(t, w.start(ctx, l))
|
||||
|
||||
client := newClient(caBundle, serverName)
|
||||
|
||||
goodURL := fmt.Sprintf("https://%s/authenticate", l.Addr().String())
|
||||
goodRequestHeaders := map[string][]string{
|
||||
"Content-Type": {"application/json; charset=UTF-8"},
|
||||
"Accept": {"application/json, */*"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
method string
|
||||
headers map[string][]string
|
||||
body func() (io.ReadCloser, error)
|
||||
|
||||
wantStatus int
|
||||
wantHeaders map[string][]string
|
||||
wantBody *authenticationv1beta1.TokenReview
|
||||
}{
|
||||
{
|
||||
name: "success for a user who belongs to multiple groups",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "success for a user who belongs to one groups",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(oneGroupUser + ":" + oneGroupPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(oneGroupUser, oneGroupUID, []string{group0}),
|
||||
},
|
||||
{
|
||||
name: "success for a user who belongs to no groups",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(noGroupUser + ":" + noGroupPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(noGroupUser, noGroupUID, nil),
|
||||
},
|
||||
{
|
||||
name: "wrong username for password",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(otherUser + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "when a user has no password hash in the secret",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(passwordUndefinedUser + ":foo") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "when a user has an invalid password hash in the secret",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(invalidPasswordHashUser + ":foo") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "success for a user has no groups defined in the secret",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBody(undefinedGroupsUser + ":" + undefinedGroupsPassword)
|
||||
},
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(undefinedGroupsUser, undefinedGroupsUID, nil),
|
||||
},
|
||||
{
|
||||
name: "when a user has empty string as their password",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(passwordUndefinedUser + ":foo") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "wrong password for username",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + otherPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "non-existent password for username",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + "some-non-existent-password") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "non-existent username",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-non-existent-user" + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "bad token format (missing colon)",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user) },
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "password contains colon",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(colonUser + ":" + colonPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(colonUser, colonUID, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "bad TokenReview group",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
user+":"+password,
|
||||
&schema.GroupVersionKind{
|
||||
Group: "bad group",
|
||||
Version: authenticationv1beta1.SchemeGroupVersion.Version,
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
)
|
||||
},
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "bad TokenReview version",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
user+":"+password,
|
||||
&schema.GroupVersionKind{
|
||||
Group: authenticationv1beta1.SchemeGroupVersion.Group,
|
||||
Version: "bad version",
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
)
|
||||
},
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "bad TokenReview kind",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
user+":"+password,
|
||||
&schema.GroupVersionKind{
|
||||
Group: authenticationv1beta1.SchemeGroupVersion.Group,
|
||||
Version: authenticationv1beta1.SchemeGroupVersion.Version,
|
||||
Kind: "wrong-kind",
|
||||
},
|
||||
)
|
||||
},
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "bad path",
|
||||
url: fmt.Sprintf("https://%s/tuna", l.Addr().String()),
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "bad method",
|
||||
url: goodURL,
|
||||
method: http.MethodGet,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusMethodNotAllowed,
|
||||
},
|
||||
{
|
||||
name: "bad content type",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/xml"},
|
||||
"Accept": {"application/json"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "bad accept",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"application/xml"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "success when there are multiple accepts and one of them is json",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"something/else, application/xml, application/json"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "success when there are multiple accepts and one of them is */*",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"something/else, */*, application/foo"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "success when there are multiple accepts and one of them is application/*",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"something/else, application/*, application/foo"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "bad body",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewBuffer([]byte("invalid body"))), nil },
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
parsedURL, err := url.Parse(test.url)
|
||||
require.NoError(t, err)
|
||||
|
||||
body, err := test.body()
|
||||
require.NoError(t, err)
|
||||
|
||||
rsp, err := client.Do(&http.Request{
|
||||
Method: test.method,
|
||||
URL: parsedURL,
|
||||
Header: test.headers,
|
||||
Body: body,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer rsp.Body.Close()
|
||||
|
||||
require.Equal(t, test.wantStatus, rsp.StatusCode)
|
||||
|
||||
if test.wantHeaders != nil {
|
||||
for k, v := range test.wantHeaders {
|
||||
require.Equal(t, v, rsp.Header.Values(k))
|
||||
}
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(rsp.Body)
|
||||
require.NoError(t, err)
|
||||
if test.wantBody != nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
var tr authenticationv1beta1.TokenReview
|
||||
require.NoError(t, json.Unmarshal(responseBody, &tr))
|
||||
require.Equal(t, test.wantBody, &tr)
|
||||
} else {
|
||||
require.Empty(t, responseBody)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createSecretInformer(t *testing.T, kubeClient kubernetes.Interface) corev1informers.SecretInformer {
|
||||
t.Helper()
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
|
||||
|
||||
secretInformer := kubeInformers.Core().V1().Secrets()
|
||||
|
||||
// We need to call Informer() on the secretInformer to lazily instantiate the
|
||||
// informer factory before syncing it.
|
||||
secretInformer.Informer()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
|
||||
defer cancel()
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
|
||||
informerTypesSynced := kubeInformers.WaitForCacheSync(ctx.Done())
|
||||
require.True(t, informerTypesSynced[reflect.TypeOf(&corev1.Secret{})])
|
||||
|
||||
return secretInformer
|
||||
}
|
||||
|
||||
// newClientProvider returns a dynamiccert.Provider configured
|
||||
// with valid serving cert, the CA bundle that can be used to verify the serving
|
||||
// cert, and the server name that can be used to verify the TLS peer.
|
||||
func newCertProvider(t *testing.T) (dynamiccert.Provider, []byte, string) {
|
||||
t.Helper()
|
||||
|
||||
serverName := "local-user-authenticator"
|
||||
|
||||
ca, err := certauthority.New(pkix.Name{CommonName: serverName + " CA"}, time.Hour*24)
|
||||
require.NoError(t, err)
|
||||
|
||||
cert, err := ca.Issue(pkix.Name{CommonName: serverName}, []string{serverName}, time.Hour*24)
|
||||
require.NoError(t, err)
|
||||
|
||||
certPEM, keyPEM, err := certauthority.ToPEM(cert)
|
||||
require.NoError(t, err)
|
||||
|
||||
certProvider := dynamiccert.New()
|
||||
certProvider.Set(certPEM, keyPEM)
|
||||
|
||||
return certProvider, ca.Bundle(), serverName
|
||||
}
|
||||
|
||||
// newClient creates an http.Client that can be used to make an HTTPS call to a
|
||||
// service whose serving certs can be verified by the provided CA bundle.
|
||||
func newClient(caBundle []byte, serverName string) *http.Client {
|
||||
rootCAs := x509.NewCertPool()
|
||||
rootCAs.AppendCertsFromPEM(caBundle)
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS13,
|
||||
RootCAs: rootCAs,
|
||||
ServerName: serverName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newTokenReviewBody creates an io.ReadCloser that contains a JSON-encodeed
|
||||
// TokenReview request with expected APIVersion and Kind fields.
|
||||
func newTokenReviewBody(token string) (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
token,
|
||||
&schema.GroupVersionKind{
|
||||
Group: authenticationv1beta1.SchemeGroupVersion.Group,
|
||||
Version: authenticationv1beta1.SchemeGroupVersion.Version,
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// newTokenReviewBodyWithGVK creates an io.ReadCloser that contains a
|
||||
// JSON-encoded TokenReview request. The TypeMeta fields of the TokenReview are
|
||||
// filled in with the provided gvk.
|
||||
func newTokenReviewBodyWithGVK(token string, gvk *schema.GroupVersionKind) (io.ReadCloser, error) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
tr := authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: gvk.GroupVersion().String(),
|
||||
Kind: gvk.Kind,
|
||||
},
|
||||
Spec: authenticationv1beta1.TokenReviewSpec{
|
||||
Token: token,
|
||||
},
|
||||
}
|
||||
err := json.NewEncoder(buf).Encode(&tr)
|
||||
return ioutil.NopCloser(buf), err
|
||||
}
|
||||
|
||||
func unauthenticatedResponseJSON() *authenticationv1beta1.TokenReview {
|
||||
return &authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: "authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func authenticatedResponseJSON(user, uid string, groups []string) *authenticationv1beta1.TokenReview {
|
||||
return &authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: "authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: true,
|
||||
User: authenticationv1beta1.UserInfo{
|
||||
Username: user,
|
||||
Groups: groups,
|
||||
UID: uid,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addSecretToFakeClientTracker(t *testing.T, kubeClient *kubernetesfake.Clientset, username, uid, password, groups string) {
|
||||
passwordHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
|
||||
require.NoError(t, err)
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(uid),
|
||||
Name: username,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"passwordHash": passwordHash,
|
||||
"groups": []byte(groups),
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(secret))
|
||||
}
|
||||
29
cmd/pinniped-server/main.go
Normal file
29
cmd/pinniped-server/main.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/server"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get())
|
||||
|
||||
ctx := genericapiserver.SetupSignalContext()
|
||||
|
||||
if err := server.New(ctx, os.Args[1:], os.Stdout, os.Stderr).Run(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
167
cmd/pinniped/cmd/exchange_credential.go
Normal file
167
cmd/pinniped/cmd/exchange_credential.go
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
idpv1alpha1 "go.pinniped.dev/generated/1.19/apis/idp/v1alpha1"
|
||||
"go.pinniped.dev/internal/client"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newExchangeCredentialCmd(os.Args, os.Stdout, os.Stderr).cmd)
|
||||
}
|
||||
|
||||
type exchangeCredentialCommand struct {
|
||||
// runFunc is called by the cobra.Command.Run hook. It is included here for
|
||||
// testability.
|
||||
runFunc func(stdout, stderr io.Writer)
|
||||
|
||||
// cmd is the cobra.Command for this CLI command. It is included here for
|
||||
// testability.
|
||||
cmd *cobra.Command
|
||||
}
|
||||
|
||||
func newExchangeCredentialCmd(args []string, stdout, stderr io.Writer) *exchangeCredentialCommand {
|
||||
c := &exchangeCredentialCommand{
|
||||
runFunc: runExchangeCredential,
|
||||
}
|
||||
|
||||
c.cmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
c.runFunc(stdout, stderr)
|
||||
},
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "exchange-credential",
|
||||
Short: "Exchange a credential for a cluster-specific access credential",
|
||||
Long: here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the identity provider to authenticate
|
||||
against
|
||||
- PINNIPED_IDP_TYPE: the type of identity provider to authenticate
|
||||
against (e.g., "webhook")
|
||||
- PINNIPED_IDP_NAME: the name of the identity provider to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
`),
|
||||
}
|
||||
|
||||
c.cmd.SetArgs(args)
|
||||
c.cmd.SetOut(stdout)
|
||||
c.cmd.SetErr(stderr)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type envGetter func(string) (string, bool)
|
||||
type tokenExchanger func(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
idp corev1.TypedLocalObjectReference,
|
||||
token string,
|
||||
caBundle string,
|
||||
apiEndpoint string,
|
||||
) (*clientauthenticationv1beta1.ExecCredential, error)
|
||||
|
||||
const (
|
||||
ErrMissingEnvVar = constable.Error("failed to get credential: environment variable not set")
|
||||
ErrInvalidIDPType = constable.Error("invalid IDP type")
|
||||
)
|
||||
|
||||
func runExchangeCredential(stdout, _ io.Writer) {
|
||||
err := exchangeCredential(os.LookupEnv, client.ExchangeToken, stdout, 30*time.Second)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func exchangeCredential(envGetter envGetter, tokenExchanger tokenExchanger, outputWriter io.Writer, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
namespace, varExists := envGetter("PINNIPED_NAMESPACE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_NAMESPACE")
|
||||
}
|
||||
|
||||
idpType, varExists := envGetter("PINNIPED_IDP_TYPE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_IDP_TYPE")
|
||||
}
|
||||
|
||||
idpName, varExists := envGetter("PINNIPED_IDP_NAME")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_IDP_NAME")
|
||||
}
|
||||
|
||||
token, varExists := envGetter("PINNIPED_TOKEN")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_TOKEN")
|
||||
}
|
||||
|
||||
caBundle, varExists := envGetter("PINNIPED_CA_BUNDLE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_CA_BUNDLE")
|
||||
}
|
||||
|
||||
apiEndpoint, varExists := envGetter("PINNIPED_K8S_API_ENDPOINT")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_K8S_API_ENDPOINT")
|
||||
}
|
||||
|
||||
idp := corev1.TypedLocalObjectReference{Name: idpName}
|
||||
switch strings.ToLower(idpType) {
|
||||
case "webhook":
|
||||
idp.APIGroup = &idpv1alpha1.SchemeGroupVersion.Group
|
||||
idp.Kind = "WebhookIdentityProvider"
|
||||
default:
|
||||
return fmt.Errorf(`%w: %q, supported values are "webhook"`, ErrInvalidIDPType, idpType)
|
||||
}
|
||||
|
||||
cred, err := tokenExchanger(ctx, namespace, idp, token, caBundle, apiEndpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get credential: %w", err)
|
||||
}
|
||||
|
||||
err = json.NewEncoder(outputWriter).Encode(cred)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal response to stdout: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func envVarNotSetError(varName string) error {
|
||||
return fmt.Errorf("%w: %s", ErrMissingEnvVar, varName)
|
||||
}
|
||||
296
cmd/pinniped/cmd/exchange_credential_test.go
Normal file
296
cmd/pinniped/cmd/exchange_credential_test.go
Normal file
@@ -0,0 +1,296 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageForExchangeCredential = here.Doc(`
|
||||
Usage:
|
||||
exchange-credential [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for exchange-credential
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpForExchangeCredential = here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the identity provider to authenticate
|
||||
against
|
||||
- PINNIPED_IDP_TYPE: the type of identity provider to authenticate
|
||||
against (e.g., "webhook")
|
||||
- PINNIPED_IDP_NAME: the name of the identity provider to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
|
||||
Usage:
|
||||
exchange-credential [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for exchange-credential
|
||||
`)
|
||||
)
|
||||
|
||||
func TestNewCredentialExchangeCmd(t *testing.T) {
|
||||
spec.Run(t, "newCredentialExchangeCmd", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var stdout, stderr *bytes.Buffer
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
stdout, stderr = bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{})
|
||||
})
|
||||
|
||||
it("calls runFunc and does not print usage or help when correct arguments and flags are used", func() {
|
||||
c := newExchangeCredentialCmd([]string{}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
r.NoError(c.cmd.Execute())
|
||||
r.True(runFuncCalled)
|
||||
r.Empty(stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
|
||||
it("fails when args are passed", func() {
|
||||
c := newExchangeCredentialCmd([]string{"some-arg"}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
errorMessage := `unknown command "some-arg" for "exchange-credential"`
|
||||
r.EqualError(c.cmd.Execute(), errorMessage)
|
||||
r.False(runFuncCalled)
|
||||
|
||||
output := "Error: " + errorMessage + "\n" + knownGoodUsageForExchangeCredential
|
||||
r.Equal(output, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
|
||||
it("prints a nice help message", func() {
|
||||
c := newExchangeCredentialCmd([]string{"--help"}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
r.NoError(c.cmd.Execute())
|
||||
r.False(runFuncCalled)
|
||||
r.Equal(knownGoodHelpForExchangeCredential, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
func TestExchangeCredential(t *testing.T) {
|
||||
spec.Run(t, "cmd.exchangeCredential", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var buffer *bytes.Buffer
|
||||
var tokenExchanger tokenExchanger
|
||||
var fakeEnv map[string]string
|
||||
|
||||
var envGetter envGetter = func(envVarName string) (string, bool) {
|
||||
value, present := fakeEnv[envVarName]
|
||||
if !present {
|
||||
return "", false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
buffer = new(bytes.Buffer)
|
||||
fakeEnv = map[string]string{
|
||||
"PINNIPED_NAMESPACE": "namespace from env",
|
||||
"PINNIPED_IDP_TYPE": "Webhook",
|
||||
"PINNIPED_IDP_NAME": "webhook name from env",
|
||||
"PINNIPED_TOKEN": "token from env",
|
||||
"PINNIPED_CA_BUNDLE": "ca bundle from env",
|
||||
"PINNIPED_K8S_API_ENDPOINT": "k8s api from env",
|
||||
}
|
||||
})
|
||||
|
||||
when("env vars are missing", func() {
|
||||
it("returns an error when PINNIPED_NAMESPACE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_NAMESPACE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_NAMESPACE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_IDP_TYPE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_IDP_TYPE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_IDP_TYPE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_IDP_NAME is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_IDP_NAME")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_IDP_NAME")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_TOKEN is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_TOKEN")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_TOKEN")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_CA_BUNDLE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_CA_BUNDLE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_CA_BUNDLE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_K8S_API_ENDPOINT is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_K8S_API_ENDPOINT")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_K8S_API_ENDPOINT")
|
||||
})
|
||||
})
|
||||
|
||||
when("env vars are invalid", func() {
|
||||
it("returns an error when PINNIPED_IDP_TYPE is missing", func() {
|
||||
fakeEnv["PINNIPED_IDP_TYPE"] = "invalid"
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, `invalid IDP type: "invalid", supported values are "webhook"`)
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange fails", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, idp corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: some error")
|
||||
})
|
||||
})
|
||||
|
||||
when("the JSON encoder fails", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, idp corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, &testutil.ErrorWriter{ReturnError: fmt.Errorf("some IO error")}, 30*time.Second)
|
||||
r.EqualError(err, "failed to marshal response to stdout: some IO error")
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange times out", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, idp corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 1*time.Millisecond)
|
||||
r.EqualError(err, "failed to get credential: context deadline exceeded")
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange succeeds", func() {
|
||||
var actualNamespace, actualToken, actualCaBundle, actualAPIEndpoint string
|
||||
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, idp corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
actualNamespace, actualToken, actualCaBundle, actualAPIEndpoint = namespace, token, caBundle, apiEndpoint
|
||||
now := metav1.NewTime(time.Date(2020, 7, 29, 1, 2, 3, 0, time.UTC))
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &now,
|
||||
ClientCertificateData: "some certificate",
|
||||
ClientKeyData: "some key",
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
|
||||
it("writes the execCredential to the given writer", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.NoError(err)
|
||||
r.Equal(fakeEnv["PINNIPED_NAMESPACE"], actualNamespace)
|
||||
r.Equal(fakeEnv["PINNIPED_TOKEN"], actualToken)
|
||||
r.Equal(fakeEnv["PINNIPED_CA_BUNDLE"], actualCaBundle)
|
||||
r.Equal(fakeEnv["PINNIPED_K8S_API_ENDPOINT"], actualAPIEndpoint)
|
||||
expected := `{
|
||||
"kind": "ExecCredential",
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"spec": {},
|
||||
"status": {
|
||||
"expirationTimestamp":"2020-07-29T01:02:03Z",
|
||||
"clientCertificateData": "some certificate",
|
||||
"clientKeyData":"some key",
|
||||
"token": "some token"
|
||||
}
|
||||
}`
|
||||
r.JSONEq(expected, buffer.String())
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
347
cmd/pinniped/cmd/get_kubeconfig.go
Normal file
347
cmd/pinniped/cmd/get_kubeconfig.go
Normal file
@@ -0,0 +1,347 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newGetKubeConfigCommand().Command())
|
||||
}
|
||||
|
||||
type getKubeConfigFlags struct {
|
||||
token string
|
||||
kubeconfig string
|
||||
contextOverride string
|
||||
namespace string
|
||||
idpName string
|
||||
idpType string
|
||||
}
|
||||
|
||||
type getKubeConfigCommand struct {
|
||||
flags getKubeConfigFlags
|
||||
// Test mocking points
|
||||
getPathToSelf func() (string, error)
|
||||
kubeClientCreator func(restConfig *rest.Config) (pinnipedclientset.Interface, error)
|
||||
}
|
||||
|
||||
func newGetKubeConfigCommand() *getKubeConfigCommand {
|
||||
return &getKubeConfigCommand{
|
||||
flags: getKubeConfigFlags{
|
||||
namespace: "pinniped",
|
||||
},
|
||||
getPathToSelf: os.Executable,
|
||||
kubeClientCreator: func(restConfig *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedclientset.NewForConfig(restConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *getKubeConfigCommand) Command() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
RunE: c.run,
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "get-kubeconfig",
|
||||
Short: "Print a kubeconfig for authenticating into a cluster via Pinniped",
|
||||
Long: here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
`),
|
||||
}
|
||||
cmd.Flags().StringVar(&c.flags.token, "token", "", "Credential to include in the resulting kubeconfig output (Required)")
|
||||
err := cmd.MarkFlagRequired("token")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmd.Flags().StringVar(&c.flags.kubeconfig, "kubeconfig", c.flags.kubeconfig, "Path to the kubeconfig file")
|
||||
cmd.Flags().StringVar(&c.flags.contextOverride, "kubeconfig-context", c.flags.contextOverride, "Kubeconfig context override")
|
||||
cmd.Flags().StringVar(&c.flags.namespace, "pinniped-namespace", c.flags.namespace, "Namespace in which Pinniped was installed")
|
||||
cmd.Flags().StringVar(&c.flags.idpType, "idp-type", c.flags.idpType, "Identity provider type (e.g., 'webhook')")
|
||||
cmd.Flags().StringVar(&c.flags.idpName, "idp-name", c.flags.idpType, "Identity provider name")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *getKubeConfigCommand) run(cmd *cobra.Command, args []string) error {
|
||||
fullPathToSelf, err := c.getPathToSelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find path to self: %w", err)
|
||||
}
|
||||
|
||||
clientConfig := newClientConfig(c.flags.kubeconfig, c.flags.contextOverride)
|
||||
|
||||
currentKubeConfig, err := clientConfig.RawConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientset, err := c.kubeClientCreator(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idpType, idpName := c.flags.idpType, c.flags.idpName
|
||||
if idpType == "" || idpName == "" {
|
||||
idpType, idpName, err = getDefaultIDP(clientset, c.flags.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
credentialIssuerConfig, err := fetchPinnipedCredentialIssuerConfig(clientset, c.flags.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if credentialIssuerConfig.Status.KubeConfigInfo == nil {
|
||||
return constable.Error(`CredentialIssuerConfig "pinniped-config" was missing KubeConfigInfo`)
|
||||
}
|
||||
|
||||
v1Cluster, err := copyCurrentClusterFromExistingKubeConfig(currentKubeConfig, c.flags.contextOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = issueWarningForNonMatchingServerOrCA(v1Cluster, credentialIssuerConfig, cmd.ErrOrStderr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := newPinnipedKubeconfig(v1Cluster, fullPathToSelf, c.flags.token, c.flags.namespace, idpType, idpName)
|
||||
|
||||
err = writeConfigAsYAML(cmd.OutOrStdout(), config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func issueWarningForNonMatchingServerOrCA(v1Cluster v1.Cluster, credentialIssuerConfig *configv1alpha1.CredentialIssuerConfig, warningsWriter io.Writer) error {
|
||||
credentialIssuerConfigCA, err := base64.StdEncoding.DecodeString(credentialIssuerConfig.Status.KubeConfigInfo.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v1Cluster.Server != credentialIssuerConfig.Status.KubeConfigInfo.Server ||
|
||||
!bytes.Equal(v1Cluster.CertificateAuthorityData, credentialIssuerConfigCA) {
|
||||
_, err := warningsWriter.Write([]byte("WARNING: Server and certificate authority did not match between local kubeconfig and Pinniped's CredentialIssuerConfig on the cluster. Using local kubeconfig values.\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("output write error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type noIDPError struct{ Namespace string }
|
||||
|
||||
func (e noIDPError) Error() string {
|
||||
return fmt.Sprintf(`no identity providers were found in namespace %q`, e.Namespace)
|
||||
}
|
||||
|
||||
type indeterminateIDPError struct{ Namespace string }
|
||||
|
||||
func (e indeterminateIDPError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
`multiple identity providers were found in namespace %q, so --pinniped-idp-name/--pinniped-idp-type must be specified`,
|
||||
e.Namespace,
|
||||
)
|
||||
}
|
||||
|
||||
func getDefaultIDP(clientset pinnipedclientset.Interface, namespace string) (string, string, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
webhooks, err := clientset.IDPV1alpha1().WebhookIdentityProviders(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
type ref struct{ idpType, idpName string }
|
||||
idps := make([]ref, 0, len(webhooks.Items))
|
||||
for _, webhook := range webhooks.Items {
|
||||
idps = append(idps, ref{idpType: "webhook", idpName: webhook.Name})
|
||||
}
|
||||
|
||||
if len(idps) == 0 {
|
||||
return "", "", noIDPError{namespace}
|
||||
}
|
||||
if len(idps) > 1 {
|
||||
return "", "", indeterminateIDPError{namespace}
|
||||
}
|
||||
return idps[0].idpType, idps[0].idpName, nil
|
||||
}
|
||||
|
||||
func fetchPinnipedCredentialIssuerConfig(clientset pinnipedclientset.Interface, pinnipedInstallationNamespace string) (*configv1alpha1.CredentialIssuerConfig, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
credentialIssuerConfigs, err := clientset.ConfigV1alpha1().CredentialIssuerConfigs(pinnipedInstallationNamespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(credentialIssuerConfigs.Items) == 0 {
|
||||
return nil, constable.Error(fmt.Sprintf(
|
||||
`No CredentialIssuerConfig was found in namespace "%s". Is Pinniped installed on this cluster in namespace "%s"?`,
|
||||
pinnipedInstallationNamespace,
|
||||
pinnipedInstallationNamespace,
|
||||
))
|
||||
}
|
||||
|
||||
if len(credentialIssuerConfigs.Items) > 1 {
|
||||
return nil, constable.Error(fmt.Sprintf(
|
||||
`More than one CredentialIssuerConfig was found in namespace "%s"`,
|
||||
pinnipedInstallationNamespace,
|
||||
))
|
||||
}
|
||||
|
||||
return &credentialIssuerConfigs.Items[0], nil
|
||||
}
|
||||
|
||||
func newClientConfig(kubeconfigPathOverride string, currentContextName string) clientcmd.ClientConfig {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.ExplicitPath = kubeconfigPathOverride
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{
|
||||
CurrentContext: currentContextName,
|
||||
})
|
||||
return clientConfig
|
||||
}
|
||||
|
||||
func writeConfigAsYAML(outputWriter io.Writer, config v1.Config) error {
|
||||
output, err := yaml.Marshal(&config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("YAML serialization error: %w", err)
|
||||
}
|
||||
|
||||
_, err = outputWriter.Write(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output write error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyCurrentClusterFromExistingKubeConfig(currentKubeConfig clientcmdapi.Config, currentContextNameOverride string) (v1.Cluster, error) {
|
||||
v1Cluster := v1.Cluster{}
|
||||
|
||||
contextName := currentKubeConfig.CurrentContext
|
||||
if currentContextNameOverride != "" {
|
||||
contextName = currentContextNameOverride
|
||||
}
|
||||
|
||||
err := v1.Convert_api_Cluster_To_v1_Cluster(
|
||||
currentKubeConfig.Clusters[currentKubeConfig.Contexts[contextName].Cluster],
|
||||
&v1Cluster,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return v1.Cluster{}, err
|
||||
}
|
||||
|
||||
return v1Cluster, nil
|
||||
}
|
||||
|
||||
func newPinnipedKubeconfig(v1Cluster v1.Cluster, fullPathToSelf string, token string, namespace string, idpType string, idpName string) v1.Config {
|
||||
clusterName := "pinniped-cluster"
|
||||
userName := "pinniped-user"
|
||||
|
||||
return v1.Config{
|
||||
Kind: "Config",
|
||||
APIVersion: v1.SchemeGroupVersion.Version,
|
||||
Preferences: v1.Preferences{},
|
||||
Clusters: []v1.NamedCluster{
|
||||
{
|
||||
Name: clusterName,
|
||||
Cluster: v1Cluster,
|
||||
},
|
||||
},
|
||||
Contexts: []v1.NamedContext{
|
||||
{
|
||||
Name: clusterName,
|
||||
Context: v1.Context{
|
||||
Cluster: clusterName,
|
||||
AuthInfo: userName,
|
||||
},
|
||||
},
|
||||
},
|
||||
AuthInfos: []v1.NamedAuthInfo{
|
||||
{
|
||||
Name: userName,
|
||||
AuthInfo: v1.AuthInfo{
|
||||
Exec: &v1.ExecConfig{
|
||||
Command: fullPathToSelf,
|
||||
Args: []string{"exchange-credential"},
|
||||
Env: []v1.ExecEnvVar{
|
||||
{
|
||||
Name: "PINNIPED_K8S_API_ENDPOINT",
|
||||
Value: v1Cluster.Server,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_CA_BUNDLE",
|
||||
Value: string(v1Cluster.CertificateAuthorityData)},
|
||||
{
|
||||
Name: "PINNIPED_NAMESPACE",
|
||||
Value: namespace,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_TOKEN",
|
||||
Value: token,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_IDP_TYPE",
|
||||
Value: idpType,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_IDP_NAME",
|
||||
Value: idpName,
|
||||
},
|
||||
},
|
||||
APIVersion: clientauthenticationv1beta1.SchemeGroupVersion.String(),
|
||||
InstallHint: "The Pinniped CLI is required to authenticate to the current cluster.\n" +
|
||||
"For more information, please visit https://pinniped.dev",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: clusterName,
|
||||
}
|
||||
}
|
||||
401
cmd/pinniped/cmd/get_kubeconfig_test.go
Normal file
401
cmd/pinniped/cmd/get_kubeconfig_test.go
Normal file
@@ -0,0 +1,401 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
idpv1alpha "go.pinniped.dev/generated/1.19/apis/idp/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
|
||||
pinnipedfake "go.pinniped.dev/generated/1.19/client/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageForGetKubeConfig = here.Doc(`
|
||||
Usage:
|
||||
get-kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for get-kubeconfig
|
||||
--idp-name string Identity provider name
|
||||
--idp-type string Identity provider type (e.g., 'webhook')
|
||||
--kubeconfig string Path to the kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context override
|
||||
--pinniped-namespace string Namespace in which Pinniped was installed (default "pinniped")
|
||||
--token string Credential to include in the resulting kubeconfig output (Required)
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpForGetKubeConfig = here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
|
||||
Usage:
|
||||
get-kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for get-kubeconfig
|
||||
--idp-name string Identity provider name
|
||||
--idp-type string Identity provider type (e.g., 'webhook')
|
||||
--kubeconfig string Path to the kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context override
|
||||
--pinniped-namespace string Namespace in which Pinniped was installed (default "pinniped")
|
||||
--token string Credential to include in the resulting kubeconfig output (Required)
|
||||
`)
|
||||
)
|
||||
|
||||
func TestNewGetKubeConfigCmd(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdout: knownGoodHelpForGetKubeConfig,
|
||||
},
|
||||
{
|
||||
name: "missing required flag",
|
||||
args: []string{},
|
||||
wantError: true,
|
||||
wantStdout: `Error: required flag(s) "token" not set` + "\n" + knownGoodUsageForGetKubeConfig,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd := newGetKubeConfigCommand().Command()
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type expectedKubeconfigYAML struct {
|
||||
clusterCAData string
|
||||
clusterServer string
|
||||
command string
|
||||
token string
|
||||
pinnipedEndpoint string
|
||||
pinnipedCABundle string
|
||||
namespace string
|
||||
idpType string
|
||||
idpName string
|
||||
}
|
||||
|
||||
func (e expectedKubeconfigYAML) String() string {
|
||||
return here.Docf(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: %s
|
||||
server: %s
|
||||
name: pinniped-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped-cluster
|
||||
user: pinniped-user
|
||||
name: pinniped-cluster
|
||||
current-context: pinniped-cluster
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped-user
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- exchange-credential
|
||||
command: %s
|
||||
env:
|
||||
- name: PINNIPED_K8S_API_ENDPOINT
|
||||
value: %s
|
||||
- name: PINNIPED_CA_BUNDLE
|
||||
value: %s
|
||||
- name: PINNIPED_NAMESPACE
|
||||
value: %s
|
||||
- name: PINNIPED_TOKEN
|
||||
value: %s
|
||||
- name: PINNIPED_IDP_TYPE
|
||||
value: %s
|
||||
- name: PINNIPED_IDP_NAME
|
||||
value: %s
|
||||
installHint: |-
|
||||
The Pinniped CLI is required to authenticate to the current cluster.
|
||||
For more information, please visit https://pinniped.dev
|
||||
`, e.clusterCAData, e.clusterServer, e.command, e.pinnipedEndpoint, e.pinnipedCABundle, e.namespace, e.token, e.idpType, e.idpName)
|
||||
}
|
||||
|
||||
func newCredentialIssuerConfig(name, namespace, server, certificateAuthorityData string) *configv1alpha1.CredentialIssuerConfig {
|
||||
return &configv1alpha1.CredentialIssuerConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CredentialIssuerConfig",
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerConfigStatus{
|
||||
KubeConfigInfo: &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
|
||||
Server: server,
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(certificateAuthorityData)),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
mocks func(*getKubeConfigCommand)
|
||||
wantError string
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "failure to get path to self",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.getPathToSelf = func() (string, error) {
|
||||
return "", fmt.Errorf("some error getting path to self")
|
||||
}
|
||||
},
|
||||
wantError: "could not find path to self: some error getting path to self",
|
||||
},
|
||||
{
|
||||
name: "kubeconfig does not exist",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.kubeconfig = "./testdata/does-not-exist.yaml"
|
||||
},
|
||||
wantError: "stat ./testdata/does-not-exist.yaml: no such file or directory",
|
||||
},
|
||||
{
|
||||
name: "fail to get client",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return nil, fmt.Errorf("some error configuring clientset")
|
||||
}
|
||||
},
|
||||
wantError: "some error configuring clientset",
|
||||
},
|
||||
{
|
||||
name: "fail to get IDPs",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.idpName = ""
|
||||
cmd.flags.idpType = ""
|
||||
clientset := pinnipedfake.NewSimpleClientset()
|
||||
clientset.PrependReactor("*", "*", func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some error getting IDPs")
|
||||
})
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
},
|
||||
wantError: "some error getting IDPs",
|
||||
},
|
||||
{
|
||||
name: "zero IDPs",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.idpName = ""
|
||||
cmd.flags.idpType = ""
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(), nil
|
||||
}
|
||||
},
|
||||
wantError: `no identity providers were found in namespace "test-namespace"`,
|
||||
},
|
||||
{
|
||||
name: "multiple IDPs",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.idpName = ""
|
||||
cmd.flags.idpType = ""
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
&idpv1alpha.WebhookIdentityProvider{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "webhook-one"}},
|
||||
&idpv1alpha.WebhookIdentityProvider{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "webhook-two"}},
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `multiple identity providers were found in namespace "test-namespace", so --pinniped-idp-name/--pinniped-idp-type must be specified`,
|
||||
},
|
||||
{
|
||||
name: "fail to get CredentialIssuerConfigs",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
clientset := pinnipedfake.NewSimpleClientset()
|
||||
clientset.PrependReactor("*", "*", func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some error getting CredentialIssuerConfigs")
|
||||
})
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
},
|
||||
wantError: "some error getting CredentialIssuerConfigs",
|
||||
},
|
||||
{
|
||||
name: "zero CredentialIssuerConfigs found",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
newCredentialIssuerConfig("pinniped-config-1", "not-the-test-namespace", "", ""),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `No CredentialIssuerConfig was found in namespace "test-namespace". Is Pinniped installed on this cluster in namespace "test-namespace"?`,
|
||||
},
|
||||
{
|
||||
name: "multiple CredentialIssuerConfigs found",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
newCredentialIssuerConfig("pinniped-config-1", "test-namespace", "", ""),
|
||||
newCredentialIssuerConfig("pinniped-config-2", "test-namespace", "", ""),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `More than one CredentialIssuerConfig was found in namespace "test-namespace"`,
|
||||
},
|
||||
{
|
||||
name: "CredentialIssuerConfig missing KubeConfigInfo",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cic := newCredentialIssuerConfig("pinniped-config", "test-namespace", "", "")
|
||||
cic.Status.KubeConfigInfo = nil
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(cic), nil
|
||||
}
|
||||
},
|
||||
wantError: `CredentialIssuerConfig "pinniped-config" was missing KubeConfigInfo`,
|
||||
},
|
||||
{
|
||||
name: "KubeConfigInfo has invalid base64",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cic := newCredentialIssuerConfig("pinniped-config", "test-namespace", "https://example.com", "")
|
||||
cic.Status.KubeConfigInfo.CertificateAuthorityData = "invalid-base64-test-ca"
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(cic), nil
|
||||
}
|
||||
},
|
||||
wantError: `illegal base64 data at input byte 7`,
|
||||
},
|
||||
{
|
||||
name: "success using remote CA data",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cic := newCredentialIssuerConfig("pinniped-config", "test-namespace", "https://fake-server-url-value", "fake-certificate-authority-data-value")
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(cic), nil
|
||||
}
|
||||
},
|
||||
wantStdout: expectedKubeconfigYAML{
|
||||
clusterCAData: "ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==",
|
||||
clusterServer: "https://fake-server-url-value",
|
||||
command: "/path/to/pinniped",
|
||||
token: "test-token",
|
||||
pinnipedEndpoint: "https://fake-server-url-value",
|
||||
pinnipedCABundle: "fake-certificate-authority-data-value",
|
||||
namespace: "test-namespace",
|
||||
idpType: "test-idp-type",
|
||||
idpName: "test-idp-name",
|
||||
}.String(),
|
||||
},
|
||||
{
|
||||
name: "success using local CA data and discovered IDP",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.idpName = ""
|
||||
cmd.flags.idpType = ""
|
||||
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
&idpv1alpha.WebhookIdentityProvider{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "discovered-idp"}},
|
||||
newCredentialIssuerConfig("pinniped-config", "test-namespace", "https://example.com", "test-ca"),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantStderr: `WARNING: Server and certificate authority did not match between local kubeconfig and Pinniped's CredentialIssuerConfig on the cluster. Using local kubeconfig values.`,
|
||||
wantStdout: expectedKubeconfigYAML{
|
||||
clusterCAData: "ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==",
|
||||
clusterServer: "https://fake-server-url-value",
|
||||
command: "/path/to/pinniped",
|
||||
token: "test-token",
|
||||
pinnipedEndpoint: "https://fake-server-url-value",
|
||||
pinnipedCABundle: "fake-certificate-authority-data-value",
|
||||
namespace: "test-namespace",
|
||||
idpType: "webhook",
|
||||
idpName: "discovered-idp",
|
||||
}.String(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Start with a default getKubeConfigCommand, set some defaults, then apply any mocks.
|
||||
c := newGetKubeConfigCommand()
|
||||
c.flags.token = "test-token"
|
||||
c.flags.namespace = "test-namespace"
|
||||
c.flags.idpName = "test-idp-name"
|
||||
c.flags.idpType = "test-idp-type"
|
||||
c.getPathToSelf = func() (string, error) { return "/path/to/pinniped", nil }
|
||||
c.flags.kubeconfig = "./testdata/kubeconfig.yaml"
|
||||
tt.mocks(c)
|
||||
|
||||
cmd := &cobra.Command{}
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs([]string{})
|
||||
err := c.run(cmd, []string{})
|
||||
if tt.wantError != "" {
|
||||
require.EqualError(t, err, tt.wantError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, strings.TrimSpace(tt.wantStdout), strings.TrimSpace(stdout.String()), "unexpected stdout")
|
||||
require.Equal(t, strings.TrimSpace(tt.wantStderr), strings.TrimSpace(stderr.String()), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
||||
28
cmd/pinniped/cmd/root.go
Normal file
28
cmd/pinniped/cmd/root.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "pinniped",
|
||||
Short: "pinniped",
|
||||
Long: "pinniped is the client-side binary for use with Pinniped-enabled Kubernetes clusters.",
|
||||
SilenceUsage: true, // do not print usage message when commands fail
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
31
cmd/pinniped/cmd/testdata/kubeconfig.yaml
vendored
Normal file
31
cmd/pinniped/cmd/testdata/kubeconfig.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ== # fake-certificate-authority-data-value
|
||||
server: https://fake-server-url-value
|
||||
name: kind-kind
|
||||
- cluster:
|
||||
certificate-authority-data: c29tZS1vdGhlci1mYWtlLWNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhLXZhbHVl # some-other-fake-certificate-authority-data-value
|
||||
server: https://some-other-fake-server-url-value
|
||||
name: some-other-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kind-kind
|
||||
user: kind-kind
|
||||
name: kind-kind
|
||||
- context:
|
||||
cluster: some-other-cluster
|
||||
user: some-other-user
|
||||
name: some-other-context
|
||||
current-context: kind-kind
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kind-kind
|
||||
user:
|
||||
client-certificate-data: ZmFrZS1jbGllbnQtY2VydGlmaWNhdGUtZGF0YS12YWx1ZQ== # fake-client-certificate-data-value
|
||||
client-key-data: ZmFrZS1jbGllbnQta2V5LWRhdGEtdmFsdWU= # fake-client-key-data-value
|
||||
- name: some-other-user
|
||||
user:
|
||||
client-certificate-data: c29tZS1vdGhlci1mYWtlLWNsaWVudC1jZXJ0aWZpY2F0ZS1kYXRhLXZhbHVl # some-other-fake-client-certificate-data-value
|
||||
client-key-data: c29tZS1vdGhlci1mYWtlLWNsaWVudC1rZXktZGF0YS12YWx1ZQ== # some-other-fake-client-key-data-value
|
||||
10
cmd/pinniped/main.go
Normal file
10
cmd/pinniped/main.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import "go.pinniped.dev/cmd/pinniped/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
||||
135
deploy-local-user-authenticator/README.md
Normal file
135
deploy-local-user-authenticator/README.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Deploying local-user-authenticator
|
||||
|
||||
## What is local-user-authenticator?
|
||||
|
||||
The local-user-authenticator app is an identity provider used for integration testing and demos.
|
||||
If you would like to demo Pinniped, but you don't have a compatible identity provider handy,
|
||||
you can use Pinniped's local-user-authenticator identity provider. Note that this is not recommended for
|
||||
production use.
|
||||
|
||||
The local-user-authenticator is a Kubernetes Deployment which runs a webhook server that implements the Kubernetes
|
||||
[Webhook Token Authentication interface](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication).
|
||||
|
||||
User accounts can be created and edited dynamically using `kubectl` commands (see below).
|
||||
|
||||
## Installing the Latest Version with Default Options
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/$(curl https://api.github.com/repos/vmware-tanzu/pinniped/releases/latest -s | jq .name -r)/install-local-user-authenticator.yaml
|
||||
```
|
||||
|
||||
## Installing an Older Version with Default Options
|
||||
|
||||
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number
|
||||
and use it to replace the version number in the URL below.
|
||||
|
||||
```bash
|
||||
# Replace v0.2.0 with your preferred version in the URL below
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/v0.2.0/install-local-user-authenticator.yaml
|
||||
```
|
||||
|
||||
## Installing with Custom Options
|
||||
|
||||
Creating your own deployment YAML file requires `ytt` from [Carvel](https://carvel.dev/) to template the YAML files
|
||||
in the [deploy-local-user-authenticator](../deploy-local-user-authenticator) directory.
|
||||
Either [install `ytt`](https://get-ytt.io/) or use the [container image from Dockerhub](https://hub.docker.com/r/k14s/image/tags).
|
||||
|
||||
1. `git clone` this repo and `git checkout` the release version tag of the release that you would like to deploy.
|
||||
1. The configuration options are in [deploy-local-user-authenticator/values.yml](values.yaml).
|
||||
Fill in the values in that file, or override those values using additional `ytt` command-line options in
|
||||
the command below. Use the release version tag as the `image_tag` value.
|
||||
2. In a terminal, cd to this `deploy-local-user-authenticator` directory
|
||||
3. To generate the final YAML files, run `ytt --file .`
|
||||
4. Deploy the generated YAML using your preferred deployment tool, such as `kubectl` or [`kapp`](https://get-kapp.io/).
|
||||
For example: `ytt --file . | kapp deploy --yes --app local-user-authenticator --diff-changes --file -`
|
||||
|
||||
## Configuring After Installing
|
||||
|
||||
### Create Users
|
||||
|
||||
Use `kubectl` to create, edit, and delete user accounts by creating a `Secret` for each user account in the same
|
||||
namespace where local-user-authenticator is deployed. The name of the `Secret` resource is the username.
|
||||
Store the user's group membership and `bcrypt` encrypted password as the contents of the `Secret`.
|
||||
For example, to create a user named `ryan` with the password `password123`
|
||||
who belongs to the groups `group1` and `group2`, use:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic ryan \
|
||||
--namespace local-user-authenticator \
|
||||
--from-literal=groups=group1,group2 \
|
||||
--from-literal=passwordHash=$(htpasswd -nbBC 10 x password123 | sed -e "s/^x://")
|
||||
```
|
||||
|
||||
Note that the above command requires a tool capable of generating a `bcrypt` hash. It uses `htpasswd`,
|
||||
which is installed on most macOS systems, and can be
|
||||
installed on some Linux systems via the `apache2-utils` package (e.g., `apt-get install apache2-utils`).
|
||||
|
||||
### Get the local-user-authenticator App's Auto-Generated Certificate Authority Bundle
|
||||
|
||||
Fetch the auto-generated CA bundle for the local-user-authenticator's HTTP TLS endpoint.
|
||||
|
||||
```bash
|
||||
kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator \
|
||||
-o jsonpath={.data.caCertificate} \
|
||||
| base64 -d \
|
||||
| tee /tmp/local-user-authenticator-ca
|
||||
```
|
||||
|
||||
### Configuring Pinniped to Use local-user-authenticator as an Identity Provider
|
||||
|
||||
When installing Pinniped on the same cluster, configure local-user-authenticator as an Identity Provider for Pinniped
|
||||
using the webhook URL `https://local-user-authenticator.local-user-authenticator.svc/authenticate`
|
||||
along with the CA bundle fetched by the above command. See [doc/demo.md](../doc/demo.md) for an example.
|
||||
|
||||
## Optional: Manually Testing the Webhook Endpoint After Installing
|
||||
|
||||
The following steps demonstrate the API of the local-user-authenticator app. Typically, a user would not need to
|
||||
interact with this API directly. Pinniped will automatically integrate with this API if the local-user-authenticator
|
||||
is configured as an identity provider for Pinniped.
|
||||
|
||||
1. Start a pod from which you can curl the endpoint from inside the cluster.
|
||||
|
||||
```bash
|
||||
kubectl run curlpod --image=curlimages/curl --command -- /bin/sh -c "while true; do echo hi; sleep 120; done"
|
||||
```
|
||||
|
||||
1. Copy the CA bundle that was fetched above onto the new pod.
|
||||
|
||||
```bash
|
||||
kubectl cp /tmp/local-user-authenticator-ca curlpod:/tmp/local-user-authenticator-ca
|
||||
```
|
||||
|
||||
1. Run a `curl` command to try to authenticate as the user created above.
|
||||
|
||||
```bash
|
||||
kubectl -it exec curlpod -- curl https://local-user-authenticator.local-user-authenticator.svc/authenticate \
|
||||
--cacert /tmp/local-user-authenticator-ca \
|
||||
-H 'Content-Type: application/json' -H 'Accept: application/json' -d '
|
||||
{
|
||||
"apiVersion": "authentication.k8s.io/v1beta1",
|
||||
"kind": "TokenReview",
|
||||
"spec": {
|
||||
"token": "ryan:password123"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
When authentication is successful the above command should return some JSON similar to the following.
|
||||
Note that the value of `authenticated` is `true` to indicate a successful authentication.
|
||||
|
||||
```json
|
||||
{"apiVersion":"authentication.k8s.io/v1beta1","kind":"TokenReview","status":{"authenticated":true,"user":{"username":"ryan","uid":"19c433ec-8f58-44ca-9ef0-2d1081ccb876","groups":["group1","group2"]}}}
|
||||
```
|
||||
|
||||
Trying the above `curl` command again with the wrong username or password in the body of the request
|
||||
should result in a JSON response which indicates that the authentication failed.
|
||||
|
||||
```json
|
||||
{"apiVersion":"authentication.k8s.io/v1beta1","kind":"TokenReview","status":{"authenticated":false}}
|
||||
```
|
||||
|
||||
1. Remove the curl pod.
|
||||
|
||||
```bash
|
||||
kubectl delete pod curlpod
|
||||
```
|
||||
80
deploy-local-user-authenticator/deployment.yaml
Normal file
80
deploy-local-user-authenticator/deployment.yaml
Normal file
@@ -0,0 +1,80 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
labels:
|
||||
name: local-user-authenticator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
---
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-pull-secret
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
||||
#@ end
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-user-authenticator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
spec:
|
||||
serviceAccountName: local-user-authenticator
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
imagePullSecrets:
|
||||
- name: image-pull-secret
|
||||
#@ end
|
||||
containers:
|
||||
- name: local-user-authenticator
|
||||
#@ if data.values.image_digest:
|
||||
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
||||
#@ else:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: #! override the default entrypoint
|
||||
- /usr/local/bin/local-user-authenticator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: local-user-authenticator
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
targetPort: 443
|
||||
30
deploy-local-user-authenticator/rbac.yaml
Normal file
30
deploy-local-user-authenticator/rbac.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [secrets]
|
||||
verbs: [create, get, list, patch, update, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: local-user-authenticator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
16
deploy-local-user-authenticator/values.yaml
Normal file
16
deploy-local-user-authenticator/values.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@data/values
|
||||
---
|
||||
|
||||
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
|
||||
image_repo: docker.io/getpinniped/pinniped-server
|
||||
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
|
||||
image_tag: latest
|
||||
|
||||
#! Specifies a secret to be used when pulling the above container image.
|
||||
#! Can be used when the above image_repo is a private registry.
|
||||
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
|
||||
#! Optional.
|
||||
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
|
||||
39
deploy/README.md
Normal file
39
deploy/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Deploying
|
||||
|
||||
## Connecting Pinniped to an Identity Provider
|
||||
|
||||
If you would like to try Pinniped, but you don't have a compatible identity provider,
|
||||
you can use Pinniped's test identity provider.
|
||||
See [deploy-local-user-authenticator/README.md](../deploy-local-user-authenticator/README.md)
|
||||
for details.
|
||||
|
||||
## Installing the Latest Version with Default Options
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/$(curl https://api.github.com/repos/vmware-tanzu/pinniped/releases/latest -s | jq .name -r)/install-pinniped.yaml
|
||||
```
|
||||
|
||||
## Installing an Older Version with Default Options
|
||||
|
||||
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number
|
||||
and use it to replace the version number in the URL below.
|
||||
|
||||
```bash
|
||||
# Replace v0.2.0 with your preferred version in the URL below
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/v0.2.0/install-pinniped.yaml
|
||||
```
|
||||
|
||||
## Installing with Custom Options
|
||||
|
||||
Creating your own deployment YAML file requires `ytt` from [Carvel](https://carvel.dev/) to template the YAML files
|
||||
in the [deploy](../deploy) directory.
|
||||
Either [install `ytt`](https://get-ytt.io/) or use the [container image from Dockerhub](https://hub.docker.com/r/k14s/image/tags).
|
||||
|
||||
1. `git clone` this repo and `git checkout` the release version tag of the release that you would like to deploy.
|
||||
1. The configuration options are in [deploy/values.yml](values.yaml).
|
||||
Fill in the values in that file, or override those values using additional `ytt` command-line options in
|
||||
the command below. Use the release version tag as the `image_tag` value.
|
||||
2. In a terminal, cd to this `deploy` directory
|
||||
3. To generate the final YAML files, run `ytt --file .`
|
||||
4. Deploy the generated YAML using your preferred deployment tool, such as `kubectl` or [`kapp`](https://get-kapp.io/).
|
||||
For example: `ytt --file . | kapp deploy --yes --app pinniped --diff-changes --file -`
|
||||
110
deploy/config.pinniped.dev_credentialissuerconfigs.yaml
Normal file
110
deploy/config.pinniped.dev_credentialissuerconfigs.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.4.0
|
||||
creationTimestamp: null
|
||||
name: credentialissuerconfigs.config.pinniped.dev
|
||||
spec:
|
||||
group: config.pinniped.dev
|
||||
names:
|
||||
kind: CredentialIssuerConfig
|
||||
listKind: CredentialIssuerConfigList
|
||||
plural: credentialissuerconfigs
|
||||
shortNames:
|
||||
- cic
|
||||
singular: credentialissuerconfig
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
status:
|
||||
description: Status of the credential issuer.
|
||||
properties:
|
||||
kubeConfigInfo:
|
||||
description: Information needed to form a valid Pinniped-based kubeconfig
|
||||
using this credential issuer.
|
||||
properties:
|
||||
certificateAuthorityData:
|
||||
description: The K8s API server CA bundle.
|
||||
minLength: 1
|
||||
type: string
|
||||
server:
|
||||
description: The K8s API server URL.
|
||||
minLength: 1
|
||||
pattern: ^https://|^http://
|
||||
type: string
|
||||
required:
|
||||
- certificateAuthorityData
|
||||
- server
|
||||
type: object
|
||||
strategies:
|
||||
description: List of integration strategies that were attempted by
|
||||
Pinniped.
|
||||
items:
|
||||
description: Status of an integration strategy that was attempted
|
||||
by Pinniped.
|
||||
properties:
|
||||
lastUpdateTime:
|
||||
description: When the status was last checked.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Human-readable description of the current status.
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status.
|
||||
enum:
|
||||
- FetchedKey
|
||||
- CouldNotFetchKey
|
||||
type: string
|
||||
status:
|
||||
description: Status of the attempted integration strategy.
|
||||
enum:
|
||||
- Success
|
||||
- Error
|
||||
type: string
|
||||
type:
|
||||
description: Type of integration attempted.
|
||||
enum:
|
||||
- KubeClusterSigningCertificate
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- strategies
|
||||
type: object
|
||||
required:
|
||||
- status
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
192
deploy/deployment.yaml
Normal file
192
deploy/deployment.yaml
Normal file
@@ -0,0 +1,192 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: #@ data.values.namespace
|
||||
labels:
|
||||
name: #@ data.values.namespace
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-config"
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
data:
|
||||
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
|
||||
#@yaml/text-templated-strings
|
||||
pinniped.yaml: |
|
||||
discovery:
|
||||
url: (@= data.values.discovery_url or "null" @)
|
||||
api:
|
||||
servingCertificate:
|
||||
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
|
||||
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
|
||||
names:
|
||||
servingCertificateSecret: (@= data.values.app_name + "-api-tls-serving-certificate" @)
|
||||
credentialIssuerConfig: (@= data.values.app_name + "-config" @)
|
||||
apiService: (@= data.values.app_name + "-api" @)
|
||||
kubeCertAgent:
|
||||
namePrefix: (@= data.values.app_name + "-kube-cert-agent-" @)
|
||||
(@ if data.values.image_digest: @)
|
||||
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
|
||||
(@ else: @)
|
||||
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
|
||||
(@ end @)
|
||||
(@ if data.values.image_pull_dockerconfigjson: @)
|
||||
imagePullSecrets:
|
||||
- image-pull-secret
|
||||
(@ end @)
|
||||
---
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-pull-secret
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
||||
#@ end
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
spec:
|
||||
replicas: #@ data.values.replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app: #@ data.values.app_name
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
serviceAccountName: #@ data.values.app_name
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
imagePullSecrets:
|
||||
- name: image-pull-secret
|
||||
#@ end
|
||||
containers:
|
||||
- name: pinniped
|
||||
#@ if data.values.image_digest:
|
||||
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
||||
#@ else:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
args:
|
||||
- --config=/etc/config/pinniped.yaml
|
||||
- --downward-api-path=/etc/podinfo
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 2
|
||||
timeoutSeconds: 15
|
||||
periodSeconds: 10
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 2
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: #@ data.values.app_name + "-config"
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master #! Allow running on master nodes too
|
||||
effect: NoSchedule
|
||||
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
|
||||
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
|
||||
#!priorityClassName: system-cluster-critical
|
||||
#! This will help make sure our multiple pods run on different nodes, making
|
||||
#! our deployment "more" "HA".
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: #@ data.values.app_name
|
||||
topologyKey: kubernetes.io/hostname
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
#! If name is changed, must also change names.apiService in the ConfigMap above
|
||||
name: #@ data.values.app_name + "-api"
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: #@ data.values.app_name
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1alpha1.login.pinniped.dev
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
spec:
|
||||
version: v1alpha1
|
||||
group: login.pinniped.dev
|
||||
groupPriorityMinimum: 2500
|
||||
versionPriority: 10
|
||||
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
|
||||
service:
|
||||
name: pinniped-api
|
||||
namespace: #@ data.values.namespace
|
||||
port: 443
|
||||
149
deploy/idp.pinniped.dev_webhookidentityproviders.yaml
Normal file
149
deploy/idp.pinniped.dev_webhookidentityproviders.yaml
Normal file
@@ -0,0 +1,149 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.4.0
|
||||
creationTimestamp: null
|
||||
name: webhookidentityproviders.idp.pinniped.dev
|
||||
spec:
|
||||
group: idp.pinniped.dev
|
||||
names:
|
||||
categories:
|
||||
- all
|
||||
- idp
|
||||
- idps
|
||||
kind: WebhookIdentityProvider
|
||||
listKind: WebhookIdentityProviderList
|
||||
plural: webhookidentityproviders
|
||||
shortNames:
|
||||
- webhookidp
|
||||
- webhookidps
|
||||
singular: webhookidentityprovider
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.endpoint
|
||||
name: Endpoint
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: WebhookIdentityProvider describes the configuration of a Pinniped
|
||||
webhook identity provider.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec for configuring the identity provider.
|
||||
properties:
|
||||
endpoint:
|
||||
description: Webhook server endpoint URL.
|
||||
minLength: 1
|
||||
pattern: ^https://
|
||||
type: string
|
||||
tls:
|
||||
description: TLS configuration.
|
||||
properties:
|
||||
certificateAuthorityData:
|
||||
description: X.509 Certificate Authority (base64-encoded PEM bundle).
|
||||
If omitted, a default set of system roots will be trusted.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- endpoint
|
||||
type: object
|
||||
status:
|
||||
description: Status of the identity provider.
|
||||
properties:
|
||||
conditions:
|
||||
description: Represents the observations of an identity provider's
|
||||
current state.
|
||||
items:
|
||||
description: Condition status of a resource (mirrored from the metav1.Condition
|
||||
type added in Kubernetes 1.19). In a future API version we can
|
||||
switch to using the upstream type. See https://github.com/kubernetes/apimachinery/blob/v0.19.0/pkg/apis/meta/v1/types.go#L1353-L1413.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- type
|
||||
x-kubernetes-list-type: map
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
185
deploy/rbac.yaml
Normal file
185
deploy/rbac.yaml
Normal file
@@ -0,0 +1,185 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#! Give permission to various cluster-scoped objects
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-aggregated-api-server"
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [namespaces]
|
||||
verbs: [get, list, watch]
|
||||
- apiGroups: [apiregistration.k8s.io]
|
||||
resources: [apiservices]
|
||||
verbs: [create, get, list, patch, update, watch]
|
||||
- apiGroups: [admissionregistration.k8s.io]
|
||||
resources: [validatingwebhookconfigurations, mutatingwebhookconfigurations]
|
||||
verbs: [get, list, watch]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-aggregated-api-server"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: #@ data.values.app_name + "-aggregated-api-server"
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-aggregated-api-server"
|
||||
namespace: #@ data.values.namespace
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [services]
|
||||
verbs: [create, get, list, patch, update, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [secrets]
|
||||
verbs: [create, get, list, patch, update, watch, delete]
|
||||
#! We need to be able to CRUD pods in our namespace so we can reconcile the kube-cert-agent pods.
|
||||
- apiGroups: [""]
|
||||
resources: [pods]
|
||||
verbs: [create, get, list, patch, update, watch, delete]
|
||||
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
||||
- apiGroups: [""]
|
||||
resources: [pods/exec]
|
||||
verbs: [create]
|
||||
- apiGroups: [config.pinniped.dev, idp.pinniped.dev]
|
||||
resources: ["*"]
|
||||
verbs: [create, get, list, update, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-aggregated-api-server"
|
||||
namespace: #@ data.values.namespace
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ data.values.app_name + "-aggregated-api-server"
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to read pods in the kube-system namespace so we can find the API server's private key
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-kube-system-pod-read"
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [pods]
|
||||
verbs: [get, list, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-kube-system-pod-read"
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ data.values.app_name + "-kube-system-pod-read"
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Allow both authenticated and unauthenticated TokenCredentialRequests (i.e. allow all requests)
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-create-token-credential-requests"
|
||||
rules:
|
||||
- apiGroups: [login.pinniped.dev]
|
||||
resources: [tokencredentialrequests]
|
||||
verbs: [create]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-create-token-credential-requests"
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:authenticated
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:unauthenticated
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: #@ data.values.app_name + "-create-token-credential-requests"
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permissions for subjectaccessreviews, tokenreview that is needed by aggregated api servers
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permissions for a special configmap of CA bundles that is needed by aggregated api servers
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-extension-apiserver-authentication-reader"
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to list and watch ConfigMaps in kube-public
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-cluster-info-lister-watcher"
|
||||
namespace: kube-public
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [configmaps]
|
||||
verbs: [list, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-cluster-info-lister-watcher"
|
||||
namespace: kube-public
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ data.values.app_name + "-cluster-info-lister-watcher"
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
33
deploy/values.yaml
Normal file
33
deploy/values.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@data/values
|
||||
---
|
||||
|
||||
app_name: pinniped
|
||||
namespace: pinniped
|
||||
|
||||
#! Specify how many replicas of the Pinniped server to run.
|
||||
replicas: 2
|
||||
|
||||
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
|
||||
image_repo: docker.io/getpinniped/pinniped-server
|
||||
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
|
||||
image_tag: latest
|
||||
|
||||
#! Specifies a secret to be used when pulling the above container image.
|
||||
#! Can be used when the above image_repo is a private registry.
|
||||
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
|
||||
#! Optional.
|
||||
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
|
||||
|
||||
#! Pinniped will try to guess the right K8s API URL for sharing that information with potential clients.
|
||||
#! This settings allows the guess to be overridden.
|
||||
#! Optional.
|
||||
discovery_url: #! e.g., https://example.com
|
||||
|
||||
#! Specify the duration and renewal interval for the API serving certificate.
|
||||
#! The defaults are set to expire the cert about every 30 days, and to rotate it
|
||||
#! about every 25 days.
|
||||
api_serving_certificate_duration_seconds: 2592000
|
||||
api_serving_certificate_renew_before_seconds: 2160000
|
||||
75
doc/architecture.md
Normal file
75
doc/architecture.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Architecture
|
||||
|
||||
The principal purpose of Pinniped is to allow users to access Kubernetes
|
||||
clusters. Pinniped hopes to enable this access across a wide range of Kubernetes
|
||||
environments with zero configuration.
|
||||
|
||||
This integration is implemented using a credential exchange API which takes as
|
||||
input a credential from the external IDP and returns a credential which is understood by the host
|
||||
Kubernetes cluster.
|
||||
|
||||
<img src="img/pinniped_architecture.svg" alt="Pinniped Architecture Sketch" width="300px"/>
|
||||
|
||||
Pinniped supports various IDP types and implements different integration strategies
|
||||
for various Kubernetes distributions to make authentication possible.
|
||||
|
||||
## Supported Kubernetes Cluster Types
|
||||
|
||||
Pinniped supports the following types of Kubernetes clusters:
|
||||
|
||||
- Clusters where the Kube Controller Manager pod is accessible from Pinniped's pods.
|
||||
|
||||
Support for other types of Kubernetes distributions is coming soon.
|
||||
|
||||
## External Identity Provider Integrations
|
||||
|
||||
Pinniped will consume identity from one or more external identity providers
|
||||
(IDPs). Administrators will configure external IDPs via Kubernetes custom
|
||||
resources allowing Pinniped to be managed using GitOps and standard Kubernetes tools.
|
||||
|
||||
Pinniped supports the following external IDP types.
|
||||
|
||||
1. Any webhook which implements the
|
||||
[Kubernetes TokenReview API](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication).
|
||||
|
||||
In addition to allowing the integration of any existing IDP which implements this API, webhooks also
|
||||
serve as an extension point for Pinniped by allowing for integration of arbitrary custom authenticators.
|
||||
While a custom implementation may be in any language or framework, this project provides a
|
||||
sample implementation in Golang. See the `ServeHTTP` method of
|
||||
[cmd/local-user-authenticator/main.go](../cmd/local-user-authenticator/main.go).
|
||||
|
||||
More IDP types are coming soon.
|
||||
|
||||
## Cluster Integration Strategies
|
||||
|
||||
Pinniped will issue a cluster credential by leveraging cluster-specific
|
||||
functionality. In the near term, cluster integrations will happen via different
|
||||
cluster-specific flows depending on the type of cluster. In the longer term,
|
||||
Pinniped hopes to contribute and leverage upstream Kubernetes extension points that
|
||||
cleanly enable this integration.
|
||||
|
||||
Pinniped supports the following cluster integration strategies.
|
||||
|
||||
1. Pinniped hosts a credential exchange API endpoint via a Kubernetes aggregated API server.
|
||||
This API returns a new cluster-specific credential using the cluster's signing keypair to
|
||||
issue short-lived cluster certificates. (In the future, when the Kubernetes CSR API
|
||||
provides a way to issue short-lived certificates, then the Pinniped credential exchange API
|
||||
will use that instead of using the cluster's signing keypair.)
|
||||
|
||||
More cluster integration strategies are coming soon, which will allow Pinniped to
|
||||
support more Kubernetes cluster types.
|
||||
|
||||
## `kubectl` Integration
|
||||
|
||||
With any of the above IDPs and integration strategies, `kubectl` commands receive the
|
||||
cluster-specific credential via a
|
||||
[Kubernetes client-go credential plugin](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins).
|
||||
Users may use the Pinniped CLI as the credential plugin, or they may use any proprietary CLI
|
||||
built with the [Pinniped Go client library](../generated).
|
||||
|
||||
## Example Cluster Authentication Sequence Diagram
|
||||
|
||||
This diagram demonstrates using `kubectl get pods` with the Pinniped CLI configured as the credential plugin,
|
||||
and with a webhook IDP configured as the identity provider for the Pinniped server.
|
||||
|
||||

|
||||
84
doc/code_of_conduct.md
Normal file
84
doc/code_of_conduct.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [oss-coc@vmware.com](mailto:oss-coc@vmware.com). All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0,
|
||||
available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
|
||||
132
doc/contributing.md
Normal file
132
doc/contributing.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Contributing to Pinniped
|
||||
|
||||
Contributions to Pinniped are welcome. Here are some things to help you get started.
|
||||
|
||||
1. Please see the [Code of Conduct](code_of_conduct.md).
|
||||
1. Learn about the [scope](scope.md) of the project.
|
||||
1. Coming soon: details about how to legally contribute to the project, including CLA/DCO details.
|
||||
1. See below for how to [file a bug report](#bugs).
|
||||
1. See below for how to [suggest a feature](#features).
|
||||
1. See below for how to [build the code](#building).
|
||||
1. See below for how to [run the tests](#testing).
|
||||
|
||||
## Meeting with the Maintainers
|
||||
|
||||
The maintainers aspire to hold a video conference every other week with the Pinniped community.
|
||||
Any community member may request to add topics to the agenda by contacting a [maintainer](../MAINTAINERS.md)
|
||||
in advance, or by attending and raising the topic during time remaining after the agenda is covered.
|
||||
Typical agenda items include topics regarding the roadmap, feature requests, bug reports, pull requests, etc.
|
||||
A [public document](https://docs.google.com/document/d/1qYA35wZV-6bxcH5375vOnIGkNBo7e4OROgsV4Sj8WjQ)
|
||||
tracks the agendas and notes for these meetings.
|
||||
|
||||
These meetings are currently scheduled for the first and third Thursday mornings of each month
|
||||
at 8 AM Pacific Time, using this [Zoom meeting](https://VMware.zoom.us/j/94638309756?pwd=V3NvRXJIdDg5QVc0TUdFM2dYRzgrUT09).
|
||||
If the meeting day falls on a US holiday, please consider that occurrence of the meeting to be canceled.
|
||||
|
||||
## Bugs
|
||||
|
||||
To file a bug report, please first open an
|
||||
[issue](https://github.com/vmware-tanzu/pinniped/issues/new?template=bug_report.md). The project team
|
||||
will work with you on your bug report.
|
||||
|
||||
Once the bug has been validated, a [pull request](https://github.com/vmware-tanzu/pinniped/compare)
|
||||
can be opened to fix the bug.
|
||||
|
||||
For specifics on what to include in your bug report, please follow the
|
||||
guidelines in the issue and pull request templates.
|
||||
|
||||
## Features
|
||||
|
||||
To suggest a feature, please first open an
|
||||
[issue](https://github.com/vmware-tanzu/pinniped/issues/new?template=feature-proposal.md)
|
||||
and tag it with `proposal`. The project team will work with you on your feature request.
|
||||
|
||||
Once the feature request has been validated, a [pull request](https://github.com/vmware-tanzu/pinniped/compare)
|
||||
can be opened to implement the feature.
|
||||
|
||||
For specifics on what to include in your feature request, please follow the
|
||||
guidelines in the issue and pull request templates.
|
||||
|
||||
## Issues
|
||||
|
||||
Github [issues](https://github.com/vmware-tanzu/pinniped/issues) can also be used for general
|
||||
inquiries and discussion regarding the project.
|
||||
|
||||
Need an idea for a project to get started contributing? Take a look at the open
|
||||
[issues](https://github.com/vmware-tanzu/pinniped/issues).
|
||||
Also check to see if any open issues are labeled with
|
||||
["good first issue"](https://github.com/vmware-tanzu/pinniped/labels/good%20first%20issue)
|
||||
or ["help wanted"](https://github.com/vmware-tanzu/pinniped/labels/help%20wanted).
|
||||
|
||||
## CLA
|
||||
|
||||
We welcome contributions from everyone but we can only accept them if you sign
|
||||
our Contributor License Agreement (CLA). If you would like to contribute and you
|
||||
have not signed it, our CLA-bot will walk you through the process when you open
|
||||
a Pull Request. For questions about the CLA process, see the
|
||||
[FAQ](https://cla.vmware.com/faq) or submit a question through the GitHub issue
|
||||
tracker.
|
||||
|
||||
## Building
|
||||
|
||||
The [Dockerfile](../Dockerfile) at the root of the repo can be used to build and
|
||||
package the code. After making a change to the code, rebuild the docker image with the following command.
|
||||
|
||||
```bash
|
||||
# From the root directory of the repo...
|
||||
docker build .
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Running Lint
|
||||
|
||||
```bash
|
||||
./hack/module.sh lint
|
||||
```
|
||||
|
||||
### Running Unit Tests
|
||||
|
||||
```bash
|
||||
./hack/module.sh units
|
||||
```
|
||||
|
||||
### Running Integration Tests
|
||||
|
||||
```bash
|
||||
./hack/prepare-for-integration-tests.sh && source /tmp/integration-test-env && go test -v -count 1 ./test/...
|
||||
```
|
||||
|
||||
The `./hack/prepare-for-integration-tests.sh` script will create a local
|
||||
[`kind`](https://kind.sigs.k8s.io/) cluster on which the integration tests will run.
|
||||
|
||||
### Observing Tests on the Continuous Integration Environment
|
||||
|
||||
CI will not be triggered on a pull request until the pull request is reviewed and
|
||||
approved for CI by a project [maintainer](../MAINTAINERS.md). Once CI is triggered,
|
||||
the progress and results will appear on the Github page for that
|
||||
[pull request](https://github.com/vmware-tanzu/pinniped/pulls) as checks. Links
|
||||
will appear to view the details of each check.
|
||||
|
||||
## Documentation
|
||||
|
||||
Any pull request which adds a new feature or changes the behavior of any feature which was previously documented
|
||||
should include updates to the documentation. All documentation lives in this repository. This project aspires to
|
||||
follow the Kubernetes [documentation style guide](https://kubernetes.io/docs/contribute/style/style-guide).
|
||||
|
||||
## Pre-commit Hooks
|
||||
|
||||
This project uses [pre-commit](https://pre-commit.com/) to agree on some conventions about whitespace/file encoding.
|
||||
|
||||
```bash
|
||||
$ brew install pre-commit
|
||||
[...]
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
## Becoming a Pinniped Maintainer
|
||||
|
||||
Regular contributors who are active in the Pinniped community and who have contributed at least several
|
||||
significant pull requests may be considered for promotion to become a maintainer upon request. Please
|
||||
contact an existing [maintainer](../MAINTAINERS.md) if you would like to be considered.
|
||||
183
doc/demo.md
Normal file
183
doc/demo.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Trying Pinniped
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A Kubernetes cluster of a type supported by Pinniped as described in [doc/architecture.md](../doc/architecture.md).
|
||||
|
||||
Don't have a cluster handy? Consider using [kind](https://kind.sigs.k8s.io/) on your local machine.
|
||||
See below for an example of using kind.
|
||||
|
||||
1. An identity provider of a type supported by Pinniped as described in [doc/architecture.md](../doc/architecture.md).
|
||||
|
||||
Don't have an identity provider of a type supported by Pinniped handy?
|
||||
Start by installing `local-user-authenticator` on the same cluster where you would like to try Pinniped
|
||||
by following the directions in [deploy-local-user-authenticator/README.md](../deploy-local-user-authenticator/README.md).
|
||||
See below for an example of deploying this on kind.
|
||||
|
||||
1. A kubeconfig where the current context points to the cluster and has admin-like
|
||||
privileges on that cluster.
|
||||
|
||||
## Steps
|
||||
|
||||
### Overview
|
||||
|
||||
Installing and trying Pinniped on any cluster will consist of the following general steps. See the next section below
|
||||
for a more specific example of installing onto a local kind cluster, including the exact commands to use for that case.
|
||||
|
||||
1. Install Pinniped. See [deploy/README.md](../deploy/README.md).
|
||||
1. Download the Pinniped CLI from [Pinniped's github Releases page](https://github.com/vmware-tanzu/pinniped/releases/latest).
|
||||
1. Generate a kubeconfig using the Pinniped CLI. Run `pinniped get-kubeconfig --help` for more information.
|
||||
1. Run `kubectl` commands using the generated kubeconfig. Pinniped will automatically be used for authentication during those commands.
|
||||
|
||||
### Steps to Deploy the Latest Release on kind Using local-user-authenticator as the Identity Provider
|
||||
|
||||
1. Install the tools required for the following steps.
|
||||
|
||||
- [Install kind](https://kind.sigs.k8s.io/docs/user/quick-start/), if not already installed. e.g. `brew install kind` on MacOS.
|
||||
|
||||
- kind depends on Docker. If not already installed, [install Docker](https://docs.docker.com/get-docker/), e.g. `brew cask install docker` on MacOS.
|
||||
|
||||
- This demo requires `kubectl`, which comes with Docker, or can be [installed separately](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
|
||||
|
||||
- This demo requires a tool capable of generating a `bcrypt` hash in order to interact with
|
||||
the webhook. The example below uses `htpasswd`, which is installed on most macOS systems, and can be
|
||||
installed on some Linux systems via the `apache2-utils` package (e.g., `apt-get install
|
||||
apache2-utils`).
|
||||
|
||||
- One of the steps below optionally uses `jq` to help find the latest release version number. It is not required.
|
||||
Install `jq` if you would like, e.g. `brew install jq` on MacOS.
|
||||
|
||||
1. Create a new Kubernetes cluster using `kind create cluster`. Optionally provide a cluster name using the `--name` flag.
|
||||
kind will automatically update your kubeconfig to point to the new cluster as a user with admin-like permissions.
|
||||
|
||||
1. Query GitHub's API for the git tag of the latest Pinniped
|
||||
[release](https://github.com/vmware-tanzu/pinniped/releases/latest).
|
||||
|
||||
```bash
|
||||
pinniped_version=$(curl https://api.github.com/repos/vmware-tanzu/pinniped/releases/latest -s | jq .name -r)
|
||||
```
|
||||
|
||||
Alternatively, [any release version](https://github.com/vmware-tanzu/pinniped/releases)
|
||||
number can be manually selected.
|
||||
|
||||
```bash
|
||||
# Example of manually choosing a release version...
|
||||
pinniped_version=v0.2.0
|
||||
```
|
||||
|
||||
1. Deploy the `local-user-authenticator` app.
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/${pinniped_version}/install-local-user-authenticator.yaml
|
||||
```
|
||||
|
||||
The `install-local-user-authenticator.yaml` file includes the default deployment options.
|
||||
If you would prefer to customize the available options, please
|
||||
see [deploy-local-user-authenticator/README.md](../deploy-local-user-authenticator/README.md)
|
||||
for instructions on how to deploy using `ytt`.
|
||||
|
||||
1. Create a test user.
|
||||
|
||||
```bash
|
||||
kubectl create secret generic pinny-the-seal \
|
||||
--namespace local-user-authenticator \
|
||||
--from-literal=groups=group1,group2 \
|
||||
--from-literal=passwordHash=$(htpasswd -nbBC 10 x password123 | sed -e "s/^x://")
|
||||
```
|
||||
|
||||
1. Fetch the auto-generated CA bundle for the `local-user-authenticator`'s HTTP TLS endpoint.
|
||||
|
||||
```bash
|
||||
kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator \
|
||||
-o jsonpath={.data.caCertificate} \
|
||||
| tee /tmp/local-user-authenticator-ca-base64-encoded
|
||||
```
|
||||
|
||||
1. Deploy Pinniped.
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/${pinniped_version}/install-pinniped.yaml
|
||||
```
|
||||
|
||||
The `install-pinniped.yaml` file includes the default deployment options.
|
||||
If you would prefer to customize the available options, please see [deploy/README.md](../deploy/README.md)
|
||||
for instructions on how to deploy using `ytt`.
|
||||
|
||||
1. Create a `WebhookIdentityProvider` object to configure Pinniped to authenticate using `local-user-authenticator`.
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl create --namespace pinniped -f -
|
||||
apiVersion: idp.pinniped.dev/v1alpha1
|
||||
kind: WebhookIdentityProvider
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
spec:
|
||||
endpoint: https://local-user-authenticator.local-user-authenticator.svc/authenticate
|
||||
tls:
|
||||
certificateAuthorityData: $(cat /tmp/local-user-authenticator-ca-base64-encoded)
|
||||
EOF
|
||||
```
|
||||
|
||||
1. Download the latest version of the Pinniped CLI binary for your platform
|
||||
from Pinniped's [latest release](https://github.com/vmware-tanzu/pinniped/releases/latest).
|
||||
|
||||
1. Move the Pinniped CLI binary to your preferred filename and directory. Add the executable bit,
|
||||
e.g. `chmod +x /usr/local/bin/pinniped`.
|
||||
|
||||
1. Generate a kubeconfig for the current cluster. Use `--token` to include a token which should
|
||||
allow you to authenticate as the user that you created above.
|
||||
|
||||
```bash
|
||||
pinniped get-kubeconfig --token "pinny-the-seal:password123" --idp-type webhook --idp-name local-user-authenticator > /tmp/pinniped-kubeconfig
|
||||
```
|
||||
|
||||
If you are using MacOS, you may get an error dialog that says
|
||||
`“pinniped” cannot be opened because the developer cannot be verified`. Cancel this dialog, open System Preferences,
|
||||
click on Security & Privacy, and click the Allow Anyway button next to the Pinniped message.
|
||||
Run the above command again and another dialog will appear saying
|
||||
`macOS cannot verify the developer of “pinniped”. Are you sure you want to open it?`.
|
||||
Click Open to allow the command to proceed.
|
||||
|
||||
Note that the above command will print a warning to the screen. You can ignore this warning.
|
||||
Pinniped tries to auto-discover the URL for the Kubernetes API server, but it is not able
|
||||
to do so on kind clusters. The warning is just letting you know that the Pinniped CLI decided
|
||||
to ignore the auto-discovery URL and instead use the URL from your existing kubeconfig.
|
||||
|
||||
1. Try using the generated kubeconfig to issue arbitrary `kubectl` commands as
|
||||
the `pinny-the-seal` user.
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped
|
||||
```
|
||||
|
||||
Because this user has no RBAC permissions on this cluster, the previous command
|
||||
results in the error `Error from server (Forbidden): pods is forbidden: User "pinny-the-seal" cannot list resource "pods" in API group "" in the namespace "pinniped"`.
|
||||
However, this does prove that you are authenticated and acting as the "pinny-the-seal" user.
|
||||
|
||||
1. As the admin user, create RBAC rules for the test user to give them permissions to perform actions on the cluster.
|
||||
For example, grant the test user permission to view all cluster resources.
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding pinny-can-read --clusterrole view --user pinny-the-seal
|
||||
```
|
||||
|
||||
1. Use the generated kubeconfig to issue arbitrary `kubectl` commands as the `pinny-the-seal` user.
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped
|
||||
```
|
||||
|
||||
The user has permission to list pods, so the command succeeds this time.
|
||||
Pinniped has provided authentication into the cluster for your `kubectl` command! 🎉
|
||||
|
||||
1. Carry on issuing as many `kubectl` commands as you'd like as the `pinny-the-seal` user.
|
||||
Each invocation will use Pinniped for authentication.
|
||||
You may find it convenient to set the `KUBECONFIG` environment variable rather than passing `--kubeconfig` to each invocation.
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/tmp/pinniped-kubeconfig
|
||||
kubectl get namespaces
|
||||
kubectl get pods -A
|
||||
```
|
||||
|
||||
1. Profit! 💰
|
||||
12
doc/img/README.md
Normal file
12
doc/img/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# `doc/img` README
|
||||
|
||||
## How to Update these Images
|
||||
|
||||
- [pinniped.svg](pinniped.svg) was generated using [`plantuml`](https://plantuml.com/).
|
||||
To regenerate the image, run `plantuml -tsvg pinniped.txt` from this directory.
|
||||
|
||||
- [pinniped_architecture.svg](pinniped_architecture.svg) was created on [draw.io](https://draw.io).
|
||||
It can be opened again for editing on that site by choosing "File" -> "Open from" -> "Device".
|
||||
Because it includes embedded icons it should be exported using "File" -> "Export as" -> "SVG",
|
||||
with the "Transparent Background", "Embed Images", and "Include a copy of my diagram" options
|
||||
checked. The icons in this diagram are from their "CAE" shapes set.
|
||||
381
doc/img/pinniped.svg
Normal file
381
doc/img/pinniped.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 43 KiB |
61
doc/img/pinniped.txt
Normal file
61
doc/img/pinniped.txt
Normal file
@@ -0,0 +1,61 @@
|
||||
@startuml "pinniped"
|
||||
|
||||
!define K8S_BLUE #326CE5
|
||||
!define K8S_SPRITES_URL https://raw.githubusercontent.com/michiel/plantuml-kubernetes-sprites/master/resource
|
||||
!include K8S_SPRITES_URL/k8s-sprites-unlabeled-25pct.iuml
|
||||
|
||||
participant "User" as USER << ($pod{scale=0.30},K8S_BLUE) >> #LightGreen
|
||||
participant "Kubectl" as KUBECTL << ($ing{scale=0.30},K8S_BLUE) >> #LightSteelBlue
|
||||
participant "Proprietary CLI" as CLI << ($svc{scale=0.30},K8S_BLUE) >> #LightPink
|
||||
participant "Pinniped" as PINNIPED << ($node{scale=0.30},K8S_BLUE) >> #LightGray
|
||||
participant "TokenReview Webhook" as WEBHOOK << ($pod{scale=0.30},K8S_BLUE) >> #LightPink
|
||||
participant "Kubernetes API" as API << ($node{scale=0.30},K8S_BLUE) >> #LightSteelBlue
|
||||
|
||||
legend
|
||||
# <back:lightsalmon>Message contains upstream IDP credentials</back>
|
||||
# <back:lightgreen>Message contains cluster-specific credentials</back>
|
||||
end legend
|
||||
|
||||
USER -> KUBECTL : ""kubectl get pods""
|
||||
activate KUBECTL
|
||||
|
||||
group Acquire cluster-specific credential
|
||||
|
||||
KUBECTL -> CLI : Get cluster-specific credential
|
||||
activate CLI
|
||||
|
||||
CLI -> CLI : Retrieve upstream IDP credential in\norganization-specific way
|
||||
|
||||
CLI -> PINNIPED : <back:lightsalmon>""POST /apis/pinniped.dev/...""</back>
|
||||
activate PINNIPED
|
||||
|
||||
PINNIPED -> WEBHOOK : <back:lightsalmon>""POST /authenticate""</back>
|
||||
activate WEBHOOK
|
||||
|
||||
WEBHOOK -> PINNIPED : ""200 OK"" with user and group information
|
||||
deactivate WEBHOOK
|
||||
|
||||
PINNIPED -> PINNIPED : Issue short-lived cluster-specific credential\nwith user and group information
|
||||
|
||||
PINNIPED -> CLI : <back:lightgreen>""200 OK""</back>
|
||||
deactivate PINNIPED
|
||||
|
||||
CLI -> KUBECTL : Here is a cluster-specific credential
|
||||
|
||||
end
|
||||
|
||||
group Authenticate to cluster with cluster-specific credential
|
||||
|
||||
KUBECTL -> API : <back:lightgreen>""GET /api/v1/pods""</back>
|
||||
activate API
|
||||
|
||||
API -> API : Glean user and group information from\ncluster-specific credential
|
||||
|
||||
API -> KUBECTL : ""200 OK"" with pods
|
||||
deactivate API
|
||||
|
||||
deactivate KUBECTL
|
||||
|
||||
end
|
||||
|
||||
@enduml
|
||||
3
doc/img/pinniped_architecture.svg
Normal file
3
doc/img/pinniped_architecture.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 79 KiB |
68
doc/img/pinniped_logo_with_text_on_right.svg
Normal file
68
doc/img/pinniped_logo_with_text_on_right.svg
Normal file
@@ -0,0 +1,68 @@
|
||||
<svg id="artwork" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 486 158"><metadata><?xpacket begin="" id="W5M0MpCehiHzreSzNTczkc9d"?>
|
||||
<x:xmpmeta xmlns:x="adobe:ns:meta/" x:xmptk="Adobe XMP Core 6.0-c002 79.164352, 2020/01/30-15:50:38 ">
|
||||
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
|
||||
<rdf:Description rdf:about=""
|
||||
xmlns:lr="http://ns.adobe.com/lightroom/1.0/"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:xmp="http://ns.adobe.com/xap/1.0/"
|
||||
xmlns:xmpMM="http://ns.adobe.com/xap/1.0/mm/"
|
||||
xmlns:stEvt="http://ns.adobe.com/xap/1.0/sType/ResourceEvent#">
|
||||
<lr:hierarchicalSubject>
|
||||
<rdf:Bag>
|
||||
<rdf:li>open source identity</rdf:li>
|
||||
<rdf:li>open source identity|636062</rdf:li>
|
||||
<rdf:li>open source identity|Pinniped</rdf:li>
|
||||
</rdf:Bag>
|
||||
</lr:hierarchicalSubject>
|
||||
<dc:subject>
|
||||
<rdf:Bag>
|
||||
<rdf:li>open source identity</rdf:li>
|
||||
<rdf:li>636062</rdf:li>
|
||||
<rdf:li>Pinniped</rdf:li>
|
||||
</rdf:Bag>
|
||||
</dc:subject>
|
||||
<xmp:MetadataDate>2020-09-17T16:06:40-07:00</xmp:MetadataDate>
|
||||
<xmpMM:InstanceID>xmp.iid:932334bf-97ee-471a-96c9-c4e5ff526fe4</xmpMM:InstanceID>
|
||||
<xmpMM:DocumentID>xmp.did:38396587-b56b-42c3-8f3e-f8e9c91f532b</xmpMM:DocumentID>
|
||||
<xmpMM:OriginalDocumentID>xmp.did:38396587-b56b-42c3-8f3e-f8e9c91f532b</xmpMM:OriginalDocumentID>
|
||||
<xmpMM:History>
|
||||
<rdf:Seq>
|
||||
<rdf:li>
|
||||
<rdf:Description>
|
||||
<stEvt:action>saved</stEvt:action>
|
||||
<stEvt:instanceID>xmp.iid:38396587-b56b-42c3-8f3e-f8e9c91f532b</stEvt:instanceID>
|
||||
<stEvt:when>2020-09-17T16:06:35-07:00</stEvt:when>
|
||||
<stEvt:softwareAgent>Adobe Bridge 2020 (Macintosh)</stEvt:softwareAgent>
|
||||
<stEvt:changed>/metadata</stEvt:changed>
|
||||
</rdf:Description>
|
||||
</rdf:li>
|
||||
<rdf:li>
|
||||
<rdf:Description>
|
||||
<stEvt:action>saved</stEvt:action>
|
||||
<stEvt:instanceID>xmp.iid:932334bf-97ee-471a-96c9-c4e5ff526fe4</stEvt:instanceID>
|
||||
<stEvt:when>2020-09-17T16:06:40-07:00</stEvt:when>
|
||||
<stEvt:softwareAgent>Adobe Bridge 2020 (Macintosh)</stEvt:softwareAgent>
|
||||
<stEvt:changed>/metadata</stEvt:changed>
|
||||
</rdf:Description>
|
||||
</rdf:li>
|
||||
</rdf:Seq>
|
||||
</xmpMM:History>
|
||||
</rdf:Description>
|
||||
</rdf:RDF>
|
||||
</x:xmpmeta>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<?xpacket end="w"?></metadata>
|
||||
<defs><style>.cls-1{fill:#717073;}.cls-2{fill:#727174;}.cls-3{fill:#fff;}.cls-4{fill:#78be43;}.cls-5{fill:#1abfd3;}.cls-6{fill:#79459b;}.cls-7{fill:#1e4488;}.cls-8{fill:#218fcf;}</style></defs><path class="cls-1" d="M170.52,58.21h16.74a17.43,17.43,0,0,1,7.68,1.68,13.45,13.45,0,0,1,5.49,4.71,12.54,12.54,0,0,1,0,13.62,13.45,13.45,0,0,1-5.49,4.71,17.43,17.43,0,0,1-7.68,1.68H175.2V99.43h-4.68Zm15.9,22a13.23,13.23,0,0,0,8.19-2.34,8.21,8.21,0,0,0,0-12.84,13.23,13.23,0,0,0-8.19-2.34H175.2V80.17Z"/><path class="cls-1" d="M209.82,58.21h4.68V99.43h-4.68Z"/><path class="cls-1" d="M225.78,58.21h4.68L256,91.75V58.21h4.68V99.43H256L230.46,65.89V99.43h-4.68Z"/><path class="cls-1" d="M272,58.21h4.68l25.56,33.54V58.21h4.68V99.43h-4.68L276.66,65.89V99.43H272Z"/><path class="cls-1" d="M318.18,58.21h4.68V99.43h-4.68Z"/><path class="cls-1" d="M334.14,58.21h16.74a17.46,17.46,0,0,1,7.68,1.68,13.51,13.51,0,0,1,5.49,4.71,12.54,12.54,0,0,1,0,13.62,13.51,13.51,0,0,1-5.49,4.71,17.46,17.46,0,0,1-7.68,1.68H338.82V99.43h-4.68Zm15.9,22a13.23,13.23,0,0,0,8.19-2.34,8.21,8.21,0,0,0,0-12.84A13.23,13.23,0,0,0,350,62.65H338.82V80.17Z"/><path class="cls-1" d="M378.18,62.65V76.21h22.38v4.44H378.18V95H403v4.44H373.44V58.21H403v4.44Z"/><path class="cls-1" d="M411.12,58.21H425a24.6,24.6,0,0,1,11.54,2.64,19.73,19.73,0,0,1,7.92,7.32,21.2,21.2,0,0,1,0,21.27,19.66,19.66,0,0,1-7.92,7.35A24.6,24.6,0,0,1,425,99.43H411.12Zm13.92,37a19.21,19.21,0,0,0,9.08-2.1,15.61,15.61,0,0,0,6.25-5.82,17,17,0,0,0,0-16.89,15.68,15.68,0,0,0-6.25-5.79,19.21,19.21,0,0,0-9.08-2.1h-9.25v32.7Z"/><path class="cls-2" d="M91.14,25.5A52.5,52.5,0,1,0,143.64,78,52.51,52.51,0,0,0,91.14,25.5Zm0,95.33A42.83,42.83,0,1,1,134,78,42.83,42.83,0,0,1,91.14,120.83Z"/><circle class="cls-3" cx="91.33" cy="77.84" r="45.75"/><circle class="cls-4" cx="91.16" cy="76.71" r="8"/><path class="cls-5" d="M118.92,58.45l1.66,6.89,5.12-.66-3-12.42-.15-.5v0l-11.73-5.08-1.53,5,6.48,2.8L101.26,66.65a14.14,14.14,0,0,1,2.9,4.24Z"/><path class="cls-6" d="M66.46,54.41,73,51.61l-1.53-5L59.68,51.73v0l-.15.5-3,12.42,5.13.66,1.65-6.89L78.13,70.94A14.23,14.23,0,0,1,81,66.68Z"/><path class="cls-7" d="M57.49,82.82,59.21,76l-4.87-1.8L51.23,86.56l0,0,.31.42,8,9.94,3.66-3.66-4.47-5.51,19.82-4.35A14.23,14.23,0,0,1,77,78.54Z"/><path class="cls-7" d="M128,74.17,123.11,76l1.72,6.85-19.54-4.28a14.23,14.23,0,0,1-1.56,4.89l19.81,4.35-4.46,5.51L122.73,97l8-9.94.31-.42,0,0Z"/><path class="cls-6" d="M103.35,109l-7.08-.33-.79,5.11,12.76.58h.56l8.14-9.86-4.34-2.85-4.5,5.45-8.43-19a14.36,14.36,0,0,1-4.58,2.28Z"/><path class="cls-5" d="M74.24,107.08l-4.5-5.45-4.34,2.85,8.14,9.86h.56l12.76-.58-.78-5.11L79,109l8.26-18.57a14.28,14.28,0,0,1-4.59-2.27Z"/><path class="cls-8" d="M93.78,62.7V43.84L100.12,47l2.79-4.35L91.49,37,91,36.74h0l-11.44,5.7,2.8,4.37,6.33-3.15v19a14.59,14.59,0,0,1,2.49-.23A15,15,0,0,1,93.78,62.7Z"/></svg>
|
||||
|
After Width: | Height: | Size: 5.4 KiB |
32
doc/scope.md
Normal file
32
doc/scope.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Project Scope
|
||||
|
||||
The Pinniped project is guided by the following principles.
|
||||
* Pinniped lets you plug any external identitiy providers into
|
||||
Kubernetes. These integrations follow enterprise-grade security principles.
|
||||
* Pinniped is easy to install and use on any Kubernetes cluster via
|
||||
distribution-specific integration mechanisms.
|
||||
* Pinniped uses a declarative configuration via Kubernetes APIs.
|
||||
* Pinniped provides optimal user experience when authenticating to many
|
||||
clusters at one time.
|
||||
* Pinniped provides enterprise-grade security posture via secure defaults and
|
||||
revocable or very short-lived credentials.
|
||||
* Where possible, Pinniped will contribute ideas and code to upstream
|
||||
Kubernetes.
|
||||
|
||||
When contributing to Pinniped, please consider whether your contribution follows
|
||||
these guiding principles.
|
||||
|
||||
## Out Of Scope
|
||||
|
||||
The following items are out of scope for the Pinniped project.
|
||||
* Authorization.
|
||||
* Standalone identity provider for general use.
|
||||
* Machine-to-machine (service) identity.
|
||||
* Running outside of Kubernetes.
|
||||
|
||||
## Roadmap
|
||||
|
||||
More details coming soon!
|
||||
|
||||
For more details on proposing features and bugs, check out our
|
||||
[contributing](contributing.md) doc.
|
||||
@@ -1,14 +0,0 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running Go linters
|
||||
FROM debian:13.2-slim AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -sfLo /tmp/codecov https://uploader.codecov.io/latest/linux/codecov
|
||||
RUN chmod +x /tmp/codecov
|
||||
|
||||
FROM golang:1.25.5
|
||||
RUN apt-get update -y && apt-get dist-upgrade -y
|
||||
COPY --from=builder /tmp/codecov /usr/local/bin/codecov
|
||||
@@ -1,10 +0,0 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM gcr.io/go-containerregistry/crane as crane
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
FROM golang:1.25.5
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin
|
||||
COPY --from=crane /ko-app/crane /usr/local/bin
|
||||
ENTRYPOINT ["bash"]
|
||||
@@ -1,16 +0,0 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
FROM debian:13.2-slim
|
||||
|
||||
# Note: libdigest-sha-perl is to get shasum, which is used when installing Carvel tools below.
|
||||
RUN apt-get update && apt-get install -y ca-certificates jq curl libdigest-sha-perl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
@@ -1,29 +0,0 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For deploying an EKS cluster and setting it up to run our tests.
|
||||
|
||||
FROM weaveworks/eksctl:v0.221.0 AS eksctl
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
FROM amazon/aws-cli:2.32.30
|
||||
RUN yum update -y && yum install -y jq perl-Digest-SHA openssl && yum clean all
|
||||
COPY --from=eksctl eksctl /usr/local/bin/eksctl
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install latest kubectl.
|
||||
RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod u+x /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator.
|
||||
# This gets installed automatically via eksctl, but currently it downloads v0.5.2,
|
||||
# which will give us a v1alpha1 execcredential rather than a v1beta1 which we want.
|
||||
# When this has changed, we can delete this:
|
||||
# https://github.com/weaveworks/eksctl/blob/main/build/docker/Dockerfile#L49
|
||||
RUN curl -sfL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /usr/local/bin/aws-iam-authenticator \
|
||||
&& chmod u+x /usr/local/bin/aws-iam-authenticator
|
||||
@@ -1,15 +0,0 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running the GitHub CLI.
|
||||
FROM debian:13.2-slim AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl \
|
||||
-sfLo /tmp/gh.tar.gz \
|
||||
https://github.com/cli/cli/releases/download/v2.40.0/gh_2.40.0_linux_amd64.tar.gz \
|
||||
&& tar -C /tmp --strip-components=1 -xzvf /tmp/gh.tar.gz
|
||||
|
||||
FROM golang:1.25.5
|
||||
COPY --from=builder /tmp/bin/gh /usr/local/bin/gh
|
||||
@@ -1,80 +0,0 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running the integration tests as a client to a k8s cluster
|
||||
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
# We need gcloud for running integration tests against GKE
|
||||
# because the kubeconfig uses gcloud as an `auth-provider`.
|
||||
# Use FROM gcloud-sdk instead of FROM golang because its
|
||||
# a lot easier to install Go than to install gcloud in the
|
||||
# subsequent commands below.
|
||||
FROM google/cloud-sdk:551.0.0-slim
|
||||
|
||||
# Install apache2-utils (for htpasswd to bcrypt passwords for the
|
||||
# local-user-authenticator) and jq.
|
||||
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Print version of gke-gcloud-auth-plugin
|
||||
RUN gke-gcloud-auth-plugin --version
|
||||
|
||||
# Create a non-root user account that can be used to run the tests.
|
||||
RUN useradd --create-home testrunner
|
||||
|
||||
# Install latest beta chrome.
|
||||
RUN \
|
||||
chown root:root /tmp && \
|
||||
chmod 1777 /tmp && \
|
||||
curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \
|
||||
echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
|
||||
apt-get -y update && \
|
||||
apt-get -y install google-chrome-beta
|
||||
|
||||
# Output Chrome version used
|
||||
RUN google-chrome --version
|
||||
|
||||
# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
RUN curl -fsSL https://go.dev/dl/go1.25.5.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
|
||||
tar -C /usr/local -xzf /tmp/go.tar.gz && \
|
||||
rm /tmp/go.tar.gz && \
|
||||
go version
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
WORKDIR $GOPATH
|
||||
|
||||
# Install go tools gotestsum and test2json to record the test output in a nice format.
|
||||
RUN go install gotest.tools/gotestsum@latest
|
||||
RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \
|
||||
mv test2json /usr/local/bin/test2json
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
|
||||
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod 0755 /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator
|
||||
RUN curl -fsSL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /bin/aws-iam-authenticator \
|
||||
&& chmod 0755 /bin/aws-iam-authenticator
|
||||
|
||||
# Install TMC CLI.
|
||||
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
|
||||
#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
|
||||
# | jq -r .versions[].linuxX64 \
|
||||
# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \
|
||||
# tmc version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# install latest nmap
|
||||
RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\
|
||||
alien nmap-7.92-1.x86_64.rpm &&\
|
||||
dpkg -i nmap_7.92-2_amd64.deb
|
||||
@@ -1,80 +0,0 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running the integration tests as a client to a k8s cluster
|
||||
|
||||
FROM mikefarah/yq:4.50.1 AS yq
|
||||
|
||||
# We need gcloud for running integration tests against GKE
|
||||
# because the kubeconfig uses gcloud as an `auth-provider`.
|
||||
# Use FROM gcloud-sdk instead of FROM golang because its
|
||||
# a lot easier to install Go than to install gcloud in the
|
||||
# subsequent commands below.
|
||||
FROM google/cloud-sdk:551.0.0-slim
|
||||
|
||||
# Install apache2-utils (for htpasswd to bcrypt passwords for the
|
||||
# local-user-authenticator) and jq.
|
||||
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps alien google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Print version of gke-gcloud-auth-plugin
|
||||
RUN gke-gcloud-auth-plugin --version
|
||||
|
||||
# Create a non-root user account that can be used to run the tests.
|
||||
RUN useradd --create-home testrunner
|
||||
|
||||
# Install latest stable chrome.
|
||||
RUN \
|
||||
chown root:root /tmp && \
|
||||
chmod 1777 /tmp && \
|
||||
curl -fsSL -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add && \
|
||||
echo "deb https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
|
||||
apt-get -y update && \
|
||||
apt-get -y install google-chrome-stable
|
||||
|
||||
# Output Chrome version used
|
||||
RUN google-chrome --version
|
||||
|
||||
# Install Go. The download URL that can be used below for any version of Go can be found on https://go.dev/dl/
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
RUN curl -fsSL https://go.dev/dl/go1.25.5.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
|
||||
tar -C /usr/local -xzf /tmp/go.tar.gz && \
|
||||
rm /tmp/go.tar.gz && \
|
||||
go version
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
WORKDIR $GOPATH
|
||||
|
||||
# Install go tools gotestsum and test2json to record the test output in a nice format.
|
||||
RUN go install gotest.tools/gotestsum@latest
|
||||
RUN env GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o test2json -ldflags="-s -w" cmd/test2json && \
|
||||
mv test2json /usr/local/bin/test2json
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsSL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install the latest kubectl as documented here: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
|
||||
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL "https://dl.k8s.io/release/stable.txt")/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod 0755 /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator
|
||||
RUN curl -fsSL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /bin/aws-iam-authenticator \
|
||||
&& chmod 0755 /bin/aws-iam-authenticator
|
||||
|
||||
# Install TMC CLI.
|
||||
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
|
||||
#RUN curl -fsSL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
|
||||
# | jq -r .versions[].linuxX64 \
|
||||
# | xargs curl -fsSL -o /bin/tmc && chmod 0755 /bin/tmc && \
|
||||
# tmc version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# install latest nmap
|
||||
RUN wget https://nmap.org/dist/nmap-7.92-1.x86_64.rpm &&\
|
||||
alien nmap-7.92-1.x86_64.rpm &&\
|
||||
dpkg -i nmap_7.92-2_amd64.deb
|
||||
@@ -1,34 +0,0 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For deploying apps onto Kubernetes clusters (including GKE)
|
||||
|
||||
FROM google/cloud-sdk:551.0.0-slim
|
||||
|
||||
# Install apache2-utils (for htpasswd to bcrypt passwords for the
|
||||
# local-user-authenticator) and jq.
|
||||
RUN apt-get update && apt-get install -y apache2-utils jq wget zip procps dnsutils google-cloud-sdk-gke-gcloud-auth-plugin && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Print version of gke-gcloud-auth-plugin
|
||||
RUN gke-gcloud-auth-plugin --version
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install latest kubectl.
|
||||
RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod u+x /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator
|
||||
RUN curl -sfL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /bin/aws-iam-authenticator \
|
||||
&& chmod u+x /bin/aws-iam-authenticator
|
||||
|
||||
# Install TMC CLI.
|
||||
# Update: The TMC CLI has been deprecated and replaced by the tanzu CLI. Commenting this out for now.
|
||||
#RUN curl -sfL https://tanzuuserauthentication.stable.tmc-dev.cloud.vmware.com/v1alpha/system/binaries \
|
||||
# | jq -r .versions[].linuxX64 \
|
||||
# | xargs curl -sfL -o /bin/tmc && chmod +x /bin/tmc && \
|
||||
# tmc version
|
||||
@@ -1,20 +0,0 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
ARG GO_VERSION
|
||||
|
||||
FROM golang:${GO_VERSION}
|
||||
|
||||
ARG GO_VERSION
|
||||
ARG K8S_PKG_VERSION
|
||||
ARG CONTROLLER_GEN_VERSION
|
||||
ARG CRD_REF_DOCS_COMMIT_SHA
|
||||
|
||||
ENV GO_VERSION=$GO_VERSION
|
||||
ENV K8S_PKG_VERSION=$K8S_PKG_VERSION
|
||||
ENV CONTROLLER_GEN_VERSION=$CONTROLLER_GEN_VERSION
|
||||
ENV CRD_REF_DOCS_COMMIT_SHA=$CRD_REF_DOCS_COMMIT_SHA
|
||||
|
||||
COPY setup.sh /codegen/
|
||||
|
||||
RUN /codegen/setup.sh
|
||||
@@ -1,116 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [ -z "$GO_VERSION" ]; then
|
||||
echo "missing GO_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$K8S_PKG_VERSION" ]; then
|
||||
echo "missing K8S_PKG_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$CONTROLLER_GEN_VERSION" ]; then
|
||||
echo "missing CONTROLLER_GEN_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Debugging output for CI...
|
||||
echo "GO_VERSION: $GO_VERSION"
|
||||
echo "K8S_PKG_VERSION: $K8S_PKG_VERSION"
|
||||
echo "CONTROLLER_GEN_VERSION: $CONTROLLER_GEN_VERSION"
|
||||
echo "CRD_REF_DOCS_COMMIT_SHA: $CRD_REF_DOCS_COMMIT_SHA"
|
||||
|
||||
apt-get update -y && apt-get dist-upgrade -y
|
||||
|
||||
cd /codegen/
|
||||
|
||||
cat <<EOF >tools.go
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
_ "k8s.io/api/core/v1"
|
||||
_ "k8s.io/code-generator"
|
||||
)
|
||||
EOF
|
||||
|
||||
cat <<EOF >go.mod
|
||||
module codegen
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
k8s.io/apimachinery v$K8S_PKG_VERSION
|
||||
k8s.io/code-generator v$K8S_PKG_VERSION
|
||||
k8s.io/api v$K8S_PKG_VERSION
|
||||
)
|
||||
EOF
|
||||
|
||||
# Resolve dependencies and download the modules.
|
||||
echo "Running go mod tidy ..."
|
||||
go mod tidy
|
||||
echo "Running go mod download ..."
|
||||
go mod download
|
||||
|
||||
# Copy the downloaded source code of k8s.io/code-generator so we can "go install" all its commands.
|
||||
rm -rf "$(go env GOPATH)/src"
|
||||
mkdir -p "$(go env GOPATH)/src/k8s.io"
|
||||
cp -pr "$(go env GOMODCACHE)/k8s.io/code-generator@v$K8S_PKG_VERSION" "$(go env GOPATH)/src/k8s.io/code-generator"
|
||||
|
||||
# Install the commands to $GOPATH/bin. Also sed the related shell scripts, but leave those in the src dir.
|
||||
# Note that update-codegen.sh invokes these shell scripts at this src path.
|
||||
# The sed is a dirty hack to avoid having the code-generator shell scripts run go install again.
|
||||
# In version 0.23.0 the line inside the shell script that previously said "go install ..." started
|
||||
# to instead say "GO111MODULE=on go install ..." so this sed is a little wrong, but still seems to work.
|
||||
echo "Running go install for all k8s.io/code-generator commands ..."
|
||||
# Using sed to edit the go.mod file (and then running go mod tidy) is a dirty hack to work around
|
||||
# an issue introduced in Go v1.25. See https://github.com/golang/go/issues/74462.
|
||||
# The version of code-generator used by Kube 1.30 depends on x/tools v0.18.0.
|
||||
# The version of code-generator used by Kube 1.31 depends on x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d.
|
||||
# Other versions of Kube use code-generator versions which do not have this problem.
|
||||
(cd "$(go env GOPATH)/src/k8s.io/code-generator" &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.18\.0#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.21\.1-.*#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
go mod tidy &&
|
||||
go install -v ./cmd/... &&
|
||||
sed -i -E -e 's/(go install.*)/# \1/g' ./*.sh)
|
||||
|
||||
if [[ ! -f "$(go env GOPATH)/bin/openapi-gen" ]]; then
|
||||
# Starting in Kube 1.30, openapi-gen moved from k8s.io/code-generator to k8s.io/kube-openapi.
|
||||
# Assuming that we are still in the /codegen directory, get the specific version of kube-openapi
|
||||
# that is selected as an indirect dependency by the go.mod.
|
||||
kube_openapi_version=$(go list -m k8s.io/kube-openapi | cut -f2 -d' ')
|
||||
# Install that version of its openapi-gen command.
|
||||
echo "Running go install for openapi-gen $kube_openapi_version ..."
|
||||
# Using sed to edit the go.mod file (and then running go mod tidy) is a dirty hack to work around
|
||||
# an issue introduced in Go v1.25. See https://github.com/golang/go/issues/74462.
|
||||
# If this were not needed, then we could just use "go install" directly without
|
||||
# copying the source code or editing the go.mod file (which is what this script used to do),
|
||||
# like this: go install -v "k8s.io/kube-openapi/cmd/openapi-gen@$kube_openapi_version"
|
||||
# The version of kube-openapi used by Kube 1.30 (and maybe 1.31) depends on x/tools v0.18.0.
|
||||
# The version of kube-openapi used by Kube 1.32 depends on x/tools v0.24.0.
|
||||
# Other versions of Kube use kube-openapi versions which do not have this problem.
|
||||
cp -pr "$(go env GOMODCACHE)/k8s.io/kube-openapi@$kube_openapi_version" "$(go env GOPATH)/src/k8s.io/kube-openapi"
|
||||
(cd "$(go env GOPATH)/src/k8s.io/kube-openapi" &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.18\.0#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
sed -i -E -e 's#golang\.org/x/tools v0\.24\.0#golang\.org/x/tools v0\.24\.1#g' ./go.mod &&
|
||||
go mod tidy &&
|
||||
go install -v ./cmd/openapi-gen)
|
||||
fi
|
||||
|
||||
echo "Running go install for controller-gen ..."
|
||||
go install -v sigs.k8s.io/controller-tools/cmd/controller-gen@v$CONTROLLER_GEN_VERSION
|
||||
|
||||
# We use a commit sha instead of a release semver because this project does not create
|
||||
# releases very often. They seem to only release 1-2 times per year, but commit to
|
||||
# main more often.
|
||||
echo "Running go install for crd-ref-docs ..."
|
||||
go install -v github.com/elastic/crd-ref-docs@$CRD_REF_DOCS_COMMIT_SHA
|
||||
|
||||
# List all the commands that we just installed.
|
||||
echo "Installed the following commands to $(go env GOPATH)/bin:"
|
||||
ls "$(go env GOPATH)/bin"
|
||||
@@ -1,17 +0,0 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# We would like to use https://github.com/cfmobile/pool-trigger-resource for our pool recycle jobs.
|
||||
# Unfortuntely, the pool-trigger-resource repo seems like it is not maintained by anyone. The most recent
|
||||
# commit was six years ago. On the other hand, its implementation is a shell script which basically
|
||||
# just calls some git commands, so it shouldn't need much maintaince if it works.
|
||||
# This is an updated version of https://github.com/cfmobile/pool-trigger-resource/blob/master/Dockerfile
|
||||
# to use newer versions of linux, jq, and git. The "assets" directory's source code is copied from
|
||||
# https://github.com/cfmobile/pool-trigger-resource/tree/master/assets as of commit efefe018c88e937.
|
||||
|
||||
FROM debian:13.2-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates jq git && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD assets/ /opt/resource/
|
||||
RUN chmod +rx /opt/resource/*
|
||||
@@ -1,219 +0,0 @@
|
||||
#!/bin/sh
|
||||
# vim: set ft=sh
|
||||
|
||||
set -e
|
||||
|
||||
exec 3>&1 # make stdout available as fd 3 for the result
|
||||
exec 1>&2 # redirect all output to stderr for logging
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
. "$(dirname "$0")"/common.sh
|
||||
|
||||
# for jq
|
||||
PATH=/usr/local/bin:$PATH
|
||||
|
||||
payload=$TMPDIR/git-resource-request
|
||||
|
||||
cat > "$payload" <&0
|
||||
|
||||
|
||||
uri=$(jq -r '.source.uri // ""' < "$payload")
|
||||
branch=$(jq -r '.source.branch // ""' < "$payload")
|
||||
pool_name=$(jq -r '.source.pool // ""' < "$payload")
|
||||
ref=$(jq -r '.version.ref // ""' < "$payload")
|
||||
|
||||
if [ -z "$uri" ]; then
|
||||
config_errors="${config_errors}invalid payload (missing uri)
|
||||
"
|
||||
fi
|
||||
|
||||
if [ -z "$branch" ]; then
|
||||
config_errors="${config_errors}invalid payload (missing branch)
|
||||
"
|
||||
fi
|
||||
|
||||
if [ -z "$pool_name" ]; then
|
||||
config_errors="${config_errors}invalid payload (missing pool)
|
||||
"
|
||||
fi
|
||||
|
||||
if [ -n "$config_errors" ]; then
|
||||
echo "$config_errors"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
###########
|
||||
#
|
||||
# end processing inputs
|
||||
#
|
||||
###########
|
||||
|
||||
###########
|
||||
#
|
||||
# start git setup
|
||||
#
|
||||
###########
|
||||
|
||||
load_pubkey "$payload"
|
||||
|
||||
destination=$TMPDIR/git-resource-repo-cache
|
||||
|
||||
if [ -d "$destination" ]; then
|
||||
cd "$destination"
|
||||
git fetch
|
||||
git reset --hard FETCH_HEAD
|
||||
else
|
||||
branchflag=""
|
||||
if [ -n "$branch" ]; then
|
||||
branchflag="--branch $branch"
|
||||
fi
|
||||
|
||||
git clone "$uri" $branchflag "$destination"
|
||||
cd "$destination"
|
||||
fi
|
||||
|
||||
|
||||
git config user.name "CI Pool Trigger Resource"
|
||||
git config user.email "ci-pool-trigger@localhost"
|
||||
|
||||
###########
|
||||
#
|
||||
# end git setup
|
||||
#
|
||||
###########
|
||||
|
||||
|
||||
###########
|
||||
#
|
||||
# start calculating pending triggers
|
||||
#
|
||||
###########
|
||||
|
||||
if [ -n "$ref" ] && git cat-file -e "$ref"; then
|
||||
ref_exists_and_is_valid=yes
|
||||
fi
|
||||
|
||||
if [ -e "$pool_name/.pending-triggers" ] && [ -e "$pool_name/.pending-removals" ]; then
|
||||
tally_files_exist=yes
|
||||
|
||||
#check validity of tally files
|
||||
fi
|
||||
|
||||
if [ -n "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then
|
||||
files_changed=$(git show --pretty="format:" --name-status -r "$ref"..HEAD -- "$pool_name"/unclaimed/)
|
||||
|
||||
set +e
|
||||
added_items=$(echo "$files_changed" | grep "^A")
|
||||
removed_items=$(echo "$files_changed" | grep "^D")
|
||||
set -e
|
||||
|
||||
if [ -n "$added_items" ]; then
|
||||
num_added_items=$(echo "$added_items" | wc -l)
|
||||
else
|
||||
num_added_items=0
|
||||
fi
|
||||
|
||||
if [ -n "$removed_items" ]; then
|
||||
num_removed_items=$(echo "$removed_items" | wc -l)
|
||||
else
|
||||
num_removed_items=0
|
||||
fi
|
||||
|
||||
old_pending_triggers=$(cat "$pool_name"/.pending-triggers)
|
||||
old_pending_removals=$(cat "$pool_name"/.pending-removals)
|
||||
|
||||
pending_triggers=$(( old_pending_triggers + num_added_items ))
|
||||
|
||||
if [ "$num_removed_items" -gt "$old_pending_removals" ]; then
|
||||
extra_removals=$(( num_removed_items - old_pending_removals ))
|
||||
pending_removals=0
|
||||
pending_triggers=$(( pending_triggers - extra_removals ))
|
||||
else
|
||||
pending_removals=$(( old_pending_removals - num_removed_items ))
|
||||
fi
|
||||
else
|
||||
pending_triggers=$(find "$pool_name"/unclaimed -not -path "*/\.*" -path "$pool_name/unclaimed/*"| wc -l)
|
||||
pending_removals=0
|
||||
fi
|
||||
###########
|
||||
#
|
||||
# end calculating pending triggers
|
||||
#
|
||||
###########
|
||||
|
||||
|
||||
###########
|
||||
#
|
||||
# start handling results
|
||||
#
|
||||
###########
|
||||
|
||||
if [ "$pending_triggers" -gt 0 ]; then
|
||||
last_commit=$(git log -1 --pretty='format:%H')
|
||||
result=$(echo "$last_commit" | jq -R '.' | jq -s "map({ref: .})")
|
||||
else
|
||||
result="[]"
|
||||
fi
|
||||
|
||||
###########
|
||||
#
|
||||
# end handling results
|
||||
#
|
||||
###########
|
||||
|
||||
|
||||
|
||||
###########
|
||||
#
|
||||
# start updating triggers
|
||||
#
|
||||
###########
|
||||
|
||||
if [ "$pending_triggers" -gt 0 ]; then
|
||||
new_pending_triggers=$(( pending_triggers - 1 ))
|
||||
new_pending_removals=$(( pending_removals + 1 ))
|
||||
echo "$new_pending_triggers" > "$pool_name"/.pending-triggers
|
||||
echo "$new_pending_removals" > "$pool_name"/.pending-removals
|
||||
git add "$pool_name"/.pending*
|
||||
|
||||
commit_message="triggering build with pending triggers: $new_pending_triggers; pending removals: $new_pending_removals"
|
||||
|
||||
if [ -n "$ref_exists_and_is_valid" ] && [ -z "$tally_files_exist" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
.pending-triggers and/or .pending-removals are missing - re-initializing resource"
|
||||
elif [ -z "$ref_exists_and_is_valid" ] && [ -n "$tally_files_exist" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
resource initialized with pre-existing .pending-triggers and .pending-removals - ignoring"
|
||||
elif [ -z "$ref_exists_and_is_valid" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
initializing tally files"
|
||||
fi
|
||||
|
||||
if [ -n "$added_items" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
additions:
|
||||
$added_items"
|
||||
fi
|
||||
|
||||
if [ -n "$removed_items" ]; then
|
||||
commit_message="$commit_message
|
||||
|
||||
removals:
|
||||
$removed_items"
|
||||
fi
|
||||
|
||||
git commit --allow-empty -m "$commit_message"
|
||||
git push
|
||||
fi
|
||||
|
||||
###########
|
||||
#
|
||||
# end updating triggers
|
||||
#
|
||||
###########
|
||||
|
||||
echo "$result" >&3
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export TMPDIR=${TMPDIR:-/tmp}
|
||||
|
||||
load_pubkey() {
|
||||
local private_key_path=$TMPDIR/git-resource-private-key
|
||||
|
||||
(jq -r '.source.private_key // empty' < "$1") > "$private_key_path"
|
||||
|
||||
if [ -s "$private_key_path" ]; then
|
||||
chmod 0600 "$private_key_path"
|
||||
|
||||
eval "$(ssh-agent)" >/dev/null 2>&1
|
||||
trap 'kill $SSH_AGENT_PID' 0
|
||||
|
||||
ssh-add "$private_key_path" >/dev/null 2>&1
|
||||
|
||||
mkdir -p ~/.ssh
|
||||
cat > ~/.ssh/config <<EOF
|
||||
StrictHostKeyChecking no
|
||||
LogLevel quiet
|
||||
EOF
|
||||
chmod 0600 ~/.ssh/config
|
||||
fi
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
cat
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
cat
|
||||
@@ -1,11 +0,0 @@
|
||||
# Copyright 2024-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# It seems that Bitnami no longer supports openldap.
|
||||
# See https://github.com/bitnami/containers/issues/83267
|
||||
# All existing container images have been migrated from the public catalog (docker.io/bitnami) to
|
||||
# the “Bitnami Legacy” repository (docker.io/bitnamilegacy), where they will no longer receive updates.
|
||||
#
|
||||
# FROM bitnami/openldap:2.6.10
|
||||
|
||||
FROM bitnamilegacy/openldap:2.6.10
|
||||
@@ -1,28 +0,0 @@
|
||||
# Copyright 2021-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# The cfssl/cfssl container image on dockerhub is built poorly.
|
||||
# For every arch, the image contains /bin/* binaries for amd64.
|
||||
# Therefore, we cannot use bash on arm64 inside this container image.
|
||||
# This was observed in cfssl/cfssl:v1.6.4.
|
||||
# However, they do compile their own binaries for both arm64 and amd64,
|
||||
# so we can just copy their binaries into a vanilla linux base image.
|
||||
FROM cfssl/cfssl:v1.6.5 as cfssl
|
||||
|
||||
# We just need any basic unix with bash, but we can pick the same
|
||||
# base image that they use, just in case they did any dynamic linking.
|
||||
FROM golang:1.25.5
|
||||
|
||||
# Thier Docerfile https://github.com/cloudflare/cfssl/blob/master/Dockerfile
|
||||
# calls their Makefile https://github.com/cloudflare/cfssl/blob/master/Makefile
|
||||
# which builds several binaries. Copy them all.
|
||||
COPY --from=cfssl /usr/bin/cf* /usr/local/bin
|
||||
COPY --from=cfssl /usr/bin/mkbundle /usr/local/bin
|
||||
COPY --from=cfssl /usr/bin/multirootca /usr/local/bin
|
||||
# Their Dockerfile also populates this directory, so copy that too.
|
||||
COPY --from=cfssl /etc/cfssl /etc/cfssl
|
||||
|
||||
# These lines are copied from the cfssl Dockerfile.
|
||||
EXPOSE 8888
|
||||
ENTRYPOINT ["cfssl"]
|
||||
CMD ["--help"]
|
||||
@@ -1,4 +0,0 @@
|
||||
# Copyright 2021-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM ghcr.io/dexidp/dex:v2.44.0
|
||||
@@ -1,13 +0,0 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:13.2-slim
|
||||
|
||||
# Install Squid and drop in a very basic, open proxy configuration.
|
||||
RUN apt-get update && apt-get install -y squid
|
||||
COPY squid.conf /etc/squid/squid.conf
|
||||
EXPOSE 3128
|
||||
|
||||
# Launch Squid as a foreground process.
|
||||
CMD squid -N -C -n proxy -d1 2>&1
|
||||
@@ -1,56 +0,0 @@
|
||||
## listen on TCP 3128
|
||||
http_port 3128
|
||||
|
||||
## Prevent caching anything (pass through only)
|
||||
cache deny all
|
||||
|
||||
## Allow all connections.
|
||||
http_access allow all
|
||||
|
||||
## Where does Squid log to?
|
||||
cache_store_log none
|
||||
cache_log /dev/null
|
||||
access_log daemon:/var/log/squid/access.log squid
|
||||
access_log syslog:user.info squid
|
||||
|
||||
## When logging, web auditors want to see the full uri, even with the query terms
|
||||
strip_query_terms off
|
||||
|
||||
## Keep 7 days of logs
|
||||
logfile_rotate 7
|
||||
|
||||
## How much RAM, in MB, to use for cache? Default since squid 3.1 is 256 MB
|
||||
cache_mem 8 MB
|
||||
|
||||
## Maximum size of individual objects to store in cache
|
||||
maximum_object_size 1 MB
|
||||
|
||||
## Amount of data to buffer from server to client
|
||||
read_ahead_gap 64 KB
|
||||
|
||||
## Number of file descriptors to support (default is 2**20 which takes up ~408 MB of memory)
|
||||
max_filedescriptors 65536
|
||||
|
||||
## Drop X-Forwarded-For headers
|
||||
forwarded_for delete
|
||||
|
||||
## Suppress sending squid version information
|
||||
httpd_suppress_version_string on
|
||||
|
||||
## How long to wait when shutting down squid
|
||||
shutdown_lifetime 10 seconds
|
||||
|
||||
## What hostname to display? (defaults to system hostname)
|
||||
visible_hostname proxy
|
||||
|
||||
## Drop some response headers that Squid normally adds (just being paranoid here)
|
||||
reply_header_access Server deny all
|
||||
reply_header_access Via deny all
|
||||
reply_header_access X-Cache deny all
|
||||
reply_header_access X-Cache-Lookup deny all
|
||||
reply_header_access X-Squid-Error deny all
|
||||
|
||||
## Drop denied connections with just a TCP reset (no error page that might leak info)
|
||||
deny_info TCP_RESET all
|
||||
|
||||
dns_v4_first off
|
||||
@@ -1,4 +0,0 @@
|
||||
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM bitnami/kubectl:latest
|
||||
287
generated/1.17/README.adoc
generated
Normal file
287
generated/1.17/README.adoc
generated
Normal file
@@ -0,0 +1,287 @@
|
||||
// Generated documentation. Please do not edit.
|
||||
:anchor_prefix: k8s-api
|
||||
|
||||
[id="{p}-api-reference"]
|
||||
== API Reference
|
||||
|
||||
.Packages
|
||||
- xref:{anchor_prefix}-config-pinniped-dev-v1alpha1[$$config.pinniped.dev/v1alpha1$$]
|
||||
- xref:{anchor_prefix}-idp-pinniped-dev-v1alpha1[$$idp.pinniped.dev/v1alpha1$$]
|
||||
- xref:{anchor_prefix}-login-pinniped-dev-v1alpha1[$$login.pinniped.dev/v1alpha1$$]
|
||||
|
||||
|
||||
[id="{anchor_prefix}-config-pinniped-dev-v1alpha1"]
|
||||
=== config.pinniped.dev/v1alpha1
|
||||
|
||||
Package v1alpha1 is the v1alpha1 version of the Pinniped configuration API.
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfig"]
|
||||
==== CredentialIssuerConfig
|
||||
|
||||
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfiglist[$$CredentialIssuerConfigList$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
|
||||
| *`status`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigstatus[$$CredentialIssuerConfigStatus$$]__ | Status of the credential issuer.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigkubeconfiginfo"]
|
||||
==== CredentialIssuerConfigKubeConfigInfo
|
||||
|
||||
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigstatus[$$CredentialIssuerConfigStatus$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`server`* __string__ | The K8s API server URL.
|
||||
| *`certificateAuthorityData`* __string__ | The K8s API server CA bundle.
|
||||
|===
|
||||
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigstatus"]
|
||||
==== CredentialIssuerConfigStatus
|
||||
|
||||
Status of a credential issuer.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfig[$$CredentialIssuerConfig$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`strategies`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigstrategy[$$CredentialIssuerConfigStrategy$$] array__ | List of integration strategies that were attempted by Pinniped.
|
||||
| *`kubeConfigInfo`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigkubeconfiginfo[$$CredentialIssuerConfigKubeConfigInfo$$]__ | Information needed to form a valid Pinniped-based kubeconfig using this credential issuer.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigstrategy"]
|
||||
==== CredentialIssuerConfigStrategy
|
||||
|
||||
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-config-v1alpha1-credentialissuerconfigstatus[$$CredentialIssuerConfigStatus$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`type`* __StrategyType__ | Type of integration attempted.
|
||||
| *`status`* __StrategyStatus__ | Status of the attempted integration strategy.
|
||||
| *`reason`* __StrategyReason__ | Reason for the current status.
|
||||
| *`message`* __string__ | Human-readable description of the current status.
|
||||
| *`lastUpdateTime`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#time-v1-meta[$$Time$$]__ | When the status was last checked.
|
||||
|===
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-idp-pinniped-dev-v1alpha1"]
|
||||
=== idp.pinniped.dev/v1alpha1
|
||||
|
||||
Package v1alpha1 is the v1alpha1 version of the Pinniped identity provider API.
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-condition"]
|
||||
==== Condition
|
||||
|
||||
Condition status of a resource (mirrored from the metav1.Condition type added in Kubernetes 1.19). In a future API version we can switch to using the upstream type. See https://github.com/kubernetes/apimachinery/blob/v0.19.0/pkg/apis/meta/v1/types.go#L1353-L1413.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderstatus[$$WebhookIdentityProviderStatus$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`type`* __string__ | type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
| *`status`* __ConditionStatus__ | status of the condition, one of True, False, Unknown.
|
||||
| *`observedGeneration`* __integer__ | observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
|
||||
| *`lastTransitionTime`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#time-v1-meta[$$Time$$]__ | lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
| *`reason`* __string__ | reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
|
||||
| *`message`* __string__ | message is a human readable message indicating details about the transition. This may be an empty string.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-tlsspec"]
|
||||
==== TLSSpec
|
||||
|
||||
Configuration for configuring TLS on various identity providers.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderspec[$$WebhookIdentityProviderSpec$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`certificateAuthorityData`* __string__ | X.509 Certificate Authority (base64-encoded PEM bundle). If omitted, a default set of system roots will be trusted.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityprovider"]
|
||||
==== WebhookIdentityProvider
|
||||
|
||||
WebhookIdentityProvider describes the configuration of a Pinniped webhook identity provider.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderlist[$$WebhookIdentityProviderList$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
|
||||
| *`spec`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderspec[$$WebhookIdentityProviderSpec$$]__ | Spec for configuring the identity provider.
|
||||
| *`status`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderstatus[$$WebhookIdentityProviderStatus$$]__ | Status of the identity provider.
|
||||
|===
|
||||
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderspec"]
|
||||
==== WebhookIdentityProviderSpec
|
||||
|
||||
Spec for configuring a webhook identity provider.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityprovider[$$WebhookIdentityProvider$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`endpoint`* __string__ | Webhook server endpoint URL.
|
||||
| *`tls`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-tlsspec[$$TLSSpec$$]__ | TLS configuration.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityproviderstatus"]
|
||||
==== WebhookIdentityProviderStatus
|
||||
|
||||
Status of a webhook identity provider.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-webhookidentityprovider[$$WebhookIdentityProvider$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`conditions`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-idp-v1alpha1-condition[$$Condition$$]__ | Represents the observations of an identity provider's current state.
|
||||
|===
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-login-pinniped-dev-v1alpha1"]
|
||||
=== login.pinniped.dev/v1alpha1
|
||||
|
||||
Package v1alpha1 is the v1alpha1 version of the Pinniped login API.
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-clustercredential"]
|
||||
==== ClusterCredential
|
||||
|
||||
ClusterCredential is the cluster-specific credential returned on a successful credential request. It contains either a valid bearer token or a valid TLS certificate and corresponding private key for the cluster.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequeststatus[$$TokenCredentialRequestStatus$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`expirationTimestamp`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#time-v1-meta[$$Time$$]__ | ExpirationTimestamp indicates a time when the provided credentials expire.
|
||||
| *`token`* __string__ | Token is a bearer token used by the client for request authentication.
|
||||
| *`clientCertificateData`* __string__ | PEM-encoded client TLS certificates (including intermediates, if any).
|
||||
| *`clientKeyData`* __string__ | PEM-encoded private key for the above certificate.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequest"]
|
||||
==== TokenCredentialRequest
|
||||
|
||||
TokenCredentialRequest submits an IDP-specific credential to Pinniped in exchange for a cluster-specific credential.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequestlist[$$TokenCredentialRequestList$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
|
||||
| *`spec`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequestspec[$$TokenCredentialRequestSpec$$]__ |
|
||||
| *`status`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequeststatus[$$TokenCredentialRequestStatus$$]__ |
|
||||
|===
|
||||
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequestspec"]
|
||||
==== TokenCredentialRequestSpec
|
||||
|
||||
TokenCredentialRequestSpec is the specification of a TokenCredentialRequest, expected on requests to the Pinniped API.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequest[$$TokenCredentialRequest$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`token`* __string__ | Bearer token supplied with the credential request.
|
||||
| *`identityProvider`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#typedlocalobjectreference-v1-core[$$TypedLocalObjectReference$$]__ | Reference to an identity provider which can fulfill this credential request.
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequeststatus"]
|
||||
==== TokenCredentialRequestStatus
|
||||
|
||||
TokenCredentialRequestStatus is the status of a TokenCredentialRequest, returned on responses to the Pinniped API.
|
||||
|
||||
.Appears In:
|
||||
****
|
||||
- xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-tokencredentialrequest[$$TokenCredentialRequest$$]
|
||||
****
|
||||
|
||||
[cols="25a,75a", options="header"]
|
||||
|===
|
||||
| Field | Description
|
||||
| *`credential`* __xref:{anchor_prefix}-go-pinniped-dev-generated-1-17-apis-login-v1alpha1-clustercredential[$$ClusterCredential$$]__ | A Credential will be returned for a successful credential request.
|
||||
| *`message`* __string__ | An error message will be returned for an unsuccessful credential request.
|
||||
|===
|
||||
|
||||
|
||||
8
generated/1.17/apis/config/doc.go
generated
Normal file
8
generated/1.17/apis/config/doc.go
generated
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=config.pinniped.dev
|
||||
|
||||
// Package config is the internal version of the Pinniped configuration API.
|
||||
package config
|
||||
4
generated/1.17/apis/config/types.go
generated
Normal file
4
generated/1.17/apis/config/types.go
generated
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package config
|
||||
4
generated/1.17/apis/config/v1alpha1/conversion.go
generated
Normal file
4
generated/1.17/apis/config/v1alpha1/conversion.go
generated
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
12
generated/1.17/apis/config/v1alpha1/defaults.go
generated
Normal file
12
generated/1.17/apis/config/v1alpha1/defaults.go
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
11
generated/1.17/apis/config/v1alpha1/doc.go
generated
Normal file
11
generated/1.17/apis/config/v1alpha1/doc.go
generated
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/generated/1.17/apis/config
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=config.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped configuration API.
|
||||
package v1alpha1
|
||||
43
generated/1.17/apis/config/v1alpha1/register.go
generated
Normal file
43
generated/1.17/apis/config/v1alpha1/register.go
generated
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "config.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&CredentialIssuerConfig{},
|
||||
&CredentialIssuerConfigList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user