mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-17 03:03:09 +00:00
Compare commits
397 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fab814d6c6 | ||
|
|
d5ff2f4447 | ||
|
|
80e5eb5b56 | ||
|
|
7f125a102a | ||
|
|
29e67b6171 | ||
|
|
e8176e412a | ||
|
|
921935271b | ||
|
|
9690e9cd68 | ||
|
|
d854ec7009 | ||
|
|
664e390006 | ||
|
|
de89d8cf99 | ||
|
|
fe687e6af2 | ||
|
|
4197e1ec2e | ||
|
|
3cee20b94a | ||
|
|
4785d9acff | ||
|
|
466ce7c808 | ||
|
|
c1fcd4c9ab | ||
|
|
346bbd95b8 | ||
|
|
08950fea60 | ||
|
|
b788a19b22 | ||
|
|
78192f4590 | ||
|
|
09f4db6d30 | ||
|
|
70ef10db2b | ||
|
|
3bb039bbd3 | ||
|
|
cc62c413e0 | ||
|
|
849ceaa55d | ||
|
|
558e6c9fa0 | ||
|
|
fc77610c74 | ||
|
|
9bf7ca31c9 | ||
|
|
29747328e1 | ||
|
|
417cb412ac | ||
|
|
27ca077fef | ||
|
|
7bfc83b64f | ||
|
|
4ed51edd7d | ||
|
|
342081dba2 | ||
|
|
c70c90dad7 | ||
|
|
261ff9e693 | ||
|
|
9872d6b87c | ||
|
|
223641d255 | ||
|
|
1fee6f0a87 | ||
|
|
edcfb295a4 | ||
|
|
77c5a06141 | ||
|
|
35a597f9b4 | ||
|
|
aad8083dc1 | ||
|
|
873cc34d4f | ||
|
|
2f00a6b126 | ||
|
|
74da0d1190 | ||
|
|
e4d2c8ba07 | ||
|
|
2c5c2b50c0 | ||
|
|
6089d5cde4 | ||
|
|
26f4b69fb1 | ||
|
|
06c0a82c65 | ||
|
|
0dbcf798e2 | ||
|
|
43f2d592e9 | ||
|
|
19e17b88a1 | ||
|
|
1adfa95f00 | ||
|
|
e3ec5de2b2 | ||
|
|
f3bf60135c | ||
|
|
c2e0a04e54 | ||
|
|
f886eec2c5 | ||
|
|
ca0bd600dc | ||
|
|
6fcbfcf306 | ||
|
|
f7680c87a8 | ||
|
|
b6a4908e8c | ||
|
|
c086fbbc1e | ||
|
|
1518d1fba7 | ||
|
|
a297481096 | ||
|
|
9f77c2e351 | ||
|
|
46b0bdf600 | ||
|
|
417ccce6fc | ||
|
|
8dc0dc9a81 | ||
|
|
d0bd7cc3de | ||
|
|
e48880e010 | ||
|
|
b725da52c3 | ||
|
|
b2cce27eae | ||
|
|
17617f872e | ||
|
|
334ce30a29 | ||
|
|
fb46a66b53 | ||
|
|
d8503afd75 | ||
|
|
8b4e42298c | ||
|
|
fee022f9dc | ||
|
|
f5512e0e0f | ||
|
|
25b371097d | ||
|
|
01cbb5b505 | ||
|
|
5adb8a7367 | ||
|
|
94179bf90b | ||
|
|
aafe93be6a | ||
|
|
4041118191 | ||
|
|
5ca16e1ed1 | ||
|
|
181ac60e24 | ||
|
|
98113cb641 | ||
|
|
54fc5a7d5f | ||
|
|
8f784f0bb4 | ||
|
|
42fee40c36 | ||
|
|
2a4ee95882 | ||
|
|
55ace667b5 | ||
|
|
f32a631bad | ||
|
|
66430acb1c | ||
|
|
42f7ec8282 | ||
|
|
d23bdc88e4 | ||
|
|
d2b29f1b66 | ||
|
|
6bf2de398a | ||
|
|
43f1bbf3f2 | ||
|
|
87a1073f28 | ||
|
|
52f3d803be | ||
|
|
ac03fed343 | ||
|
|
a3ff75828e | ||
|
|
862a234f64 | ||
|
|
386ace50e9 | ||
|
|
e371d5c8b2 | ||
|
|
7ff5115277 | ||
|
|
4f877766b5 | ||
|
|
a76222b121 | ||
|
|
91439f9b9b | ||
|
|
a906451dee | ||
|
|
2e32c27f80 | ||
|
|
34cec020a2 | ||
|
|
617187cfbb | ||
|
|
de70584460 | ||
|
|
73c9be5e2a | ||
|
|
e64f40fa7b | ||
|
|
2e09369804 | ||
|
|
80953b02d0 | ||
|
|
26c4363418 | ||
|
|
ad2cb85543 | ||
|
|
00ae1ef4e9 | ||
|
|
a065356a54 | ||
|
|
6202d703cb | ||
|
|
244fa1d474 | ||
|
|
6bbda7b7db | ||
|
|
ba75de77f9 | ||
|
|
7bae68d16a | ||
|
|
59fc775c56 | ||
|
|
09bfcb3c5a | ||
|
|
b99d093979 | ||
|
|
b16c13845d | ||
|
|
a1235857d3 | ||
|
|
b25a2bc015 | ||
|
|
467396215b | ||
|
|
a71b93d4f8 | ||
|
|
6bcaf5255f | ||
|
|
ade3aa7e1a | ||
|
|
12ec989f15 | ||
|
|
82ea7faff6 | ||
|
|
7b3efc4e58 | ||
|
|
0c7b8350ae | ||
|
|
ba8ef3fbc1 | ||
|
|
b46cf40bad | ||
|
|
66dab6f64e | ||
|
|
9b3b148d6f | ||
|
|
7b8e0f0e0d | ||
|
|
6c1e29c2f1 | ||
|
|
e28181634e | ||
|
|
e07b9fc1e2 | ||
|
|
18835ed1be | ||
|
|
d6fc3c3160 | ||
|
|
7d32a91e62 | ||
|
|
6956bd3f61 | ||
|
|
a082608827 | ||
|
|
b7620fb40c | ||
|
|
8e8d61086a | ||
|
|
4d91e39b36 | ||
|
|
4bbae25452 | ||
|
|
6e911bb1ef | ||
|
|
058b122bb3 | ||
|
|
a78812acb5 | ||
|
|
d7abafd44a | ||
|
|
02ae5132a3 | ||
|
|
89bb9ec2e1 | ||
|
|
f4547da79d | ||
|
|
39e67f56b1 | ||
|
|
80cdfb938c | ||
|
|
2376ebce99 | ||
|
|
2023061ae2 | ||
|
|
792a3e5ada | ||
|
|
6020200423 | ||
|
|
00ee3b6296 | ||
|
|
35b4d04ab0 | ||
|
|
f9d3da45c4 | ||
|
|
07a5c06501 | ||
|
|
d382f91ee0 | ||
|
|
ebb40d7101 | ||
|
|
6404dcd266 | ||
|
|
5d3d4c8865 | ||
|
|
1acd1b7998 | ||
|
|
3430006aa4 | ||
|
|
7872d7fef2 | ||
|
|
fb2c6d2125 | ||
|
|
d1e6738239 | ||
|
|
01051e9de0 | ||
|
|
61c11e268d | ||
|
|
840744da70 | ||
|
|
0a3107c38e | ||
|
|
c40123e415 | ||
|
|
92c9335aae | ||
|
|
b28cfcaf05 | ||
|
|
296f41a462 | ||
|
|
c9c974fcab | ||
|
|
5c7288c1e2 | ||
|
|
de564c0c63 | ||
|
|
10d3f3c219 | ||
|
|
c14a4a2bf4 | ||
|
|
d3a2fbd192 | ||
|
|
29c8b191d3 | ||
|
|
ebba823c0b | ||
|
|
ec5c8d90f3 | ||
|
|
f573d0f0e9 | ||
|
|
0508d05e0f | ||
|
|
23541830ef | ||
|
|
75134f939b | ||
|
|
e0be76857e | ||
|
|
3bd23f1a3b | ||
|
|
d16d47c89a | ||
|
|
be9f564f4b | ||
|
|
c0da4d337c | ||
|
|
948e64df7e | ||
|
|
d4d800a22c | ||
|
|
09b687ecdb | ||
|
|
e401a5b184 | ||
|
|
15e917f86b | ||
|
|
4a1600787c | ||
|
|
58bee91e28 | ||
|
|
a855788c5f | ||
|
|
649e3d4e5d | ||
|
|
ec4747524a | ||
|
|
6eff5d8900 | ||
|
|
5ca43848ae | ||
|
|
ab5f59602f | ||
|
|
8aaccd356e | ||
|
|
4b9285cecd | ||
|
|
2f8c956510 | ||
|
|
1cca662e54 | ||
|
|
b3f3b9a298 | ||
|
|
da7d630c44 | ||
|
|
85fd6ca95b | ||
|
|
f74a1684f4 | ||
|
|
8841975bf1 | ||
|
|
13762286b8 | ||
|
|
a621e52fc2 | ||
|
|
12760d7706 | ||
|
|
c1b3ac7254 | ||
|
|
96007ae056 | ||
|
|
6682e1271f | ||
|
|
c2d5e4367d | ||
|
|
3aa78eb33f | ||
|
|
c4fafb89ea | ||
|
|
9e10c3a290 | ||
|
|
ab939fc4e2 | ||
|
|
f973afcc80 | ||
|
|
b653f5d6f2 | ||
|
|
299f9ead0b | ||
|
|
c823856098 | ||
|
|
8da9854b42 | ||
|
|
ea3228b226 | ||
|
|
ed18119a55 | ||
|
|
d2a0ee82ef | ||
|
|
4540e5be3a | ||
|
|
d810c27775 | ||
|
|
8eee1aae51 | ||
|
|
6e3abf198e | ||
|
|
51b2363031 | ||
|
|
b45dc3ddd4 | ||
|
|
34d2c4709e | ||
|
|
4c14ad3299 | ||
|
|
e134edcc41 | ||
|
|
0c95f0304d | ||
|
|
5505f5745c | ||
|
|
6918deef71 | ||
|
|
2504a3cc8a | ||
|
|
1ed53cf630 | ||
|
|
aa258f1991 | ||
|
|
b87893420b | ||
|
|
0acd38fa7d | ||
|
|
a4d091c857 | ||
|
|
a4113243d9 | ||
|
|
faf3394c46 | ||
|
|
a026244d5a | ||
|
|
4596d47d6c | ||
|
|
b9984d2f92 | ||
|
|
8e6ecb9466 | ||
|
|
26ccd824a4 | ||
|
|
cbe99119a2 | ||
|
|
8ca0f319a1 | ||
|
|
ee439d0ba9 | ||
|
|
28fbf5029a | ||
|
|
a860c85c71 | ||
|
|
3cdbc5b09b | ||
|
|
cbabfbe942 | ||
|
|
cbea626d96 | ||
|
|
171ec457f1 | ||
|
|
793559c67c | ||
|
|
b9fe22f3a5 | ||
|
|
b5e67330b1 | ||
|
|
6fad7ef3c2 | ||
|
|
0f3ae1cf22 | ||
|
|
73664b5d19 | ||
|
|
36bc1a9d65 | ||
|
|
04b870d288 | ||
|
|
f45f8cf2dc | ||
|
|
4e1aa9fa05 | ||
|
|
093c56f24e | ||
|
|
9960c80351 | ||
|
|
45bc187ee4 | ||
|
|
434b1e2818 | ||
|
|
b0904132c4 | ||
|
|
d376fd318f | ||
|
|
2d62a03bb2 | ||
|
|
8e72e0e20b | ||
|
|
48a90ae2dc | ||
|
|
40c15acde9 | ||
|
|
3033f87e41 | ||
|
|
6450d0a307 | ||
|
|
985137df8d | ||
|
|
fe3cd8cabe | ||
|
|
25cbd87648 | ||
|
|
1cf10bbdc6 | ||
|
|
02e0b0e2c7 | ||
|
|
a41a296b55 | ||
|
|
45b3f4ec8f | ||
|
|
82461287a4 | ||
|
|
10f005fc7d | ||
|
|
81ff7ce484 | ||
|
|
33c2cdc9d9 | ||
|
|
f1e3ba9eca | ||
|
|
7bf68cbf8e | ||
|
|
38adfff384 | ||
|
|
98a0e42f20 | ||
|
|
ca0f632790 | ||
|
|
08f0bdead4 | ||
|
|
313b16530a | ||
|
|
8b28d01bfe | ||
|
|
16a27bddf7 | ||
|
|
4467f11d93 | ||
|
|
e1464c379c | ||
|
|
9446262246 | ||
|
|
e187065900 | ||
|
|
ee942a1263 | ||
|
|
e290bf19e6 | ||
|
|
6254bbccd4 | ||
|
|
3b6b3534dc | ||
|
|
290227762c | ||
|
|
fb5e550057 | ||
|
|
78b9707fde | ||
|
|
7b3ae89420 | ||
|
|
db1fcfe116 | ||
|
|
0423e49ffa | ||
|
|
6829c3095d | ||
|
|
4c3d6721a5 | ||
|
|
5f9d2bb7d9 | ||
|
|
4639b4e7c3 | ||
|
|
7137399964 | ||
|
|
f1181fce54 | ||
|
|
63e03cded9 | ||
|
|
76489c840c | ||
|
|
e7845e2cd9 | ||
|
|
a10c8946a9 | ||
|
|
055f5e4082 | ||
|
|
001f130cb1 | ||
|
|
df87953a41 | ||
|
|
99172cfea9 | ||
|
|
2acdfecc9b | ||
|
|
f7b1aae324 | ||
|
|
2b5b80ee6b | ||
|
|
15efa88933 | ||
|
|
acb590679c | ||
|
|
a0c65fe9bd | ||
|
|
b02861bfea | ||
|
|
42b0982ae6 | ||
|
|
393aa59608 | ||
|
|
5a3048745f | ||
|
|
e4f5bbe3d2 | ||
|
|
1a55b7aa44 | ||
|
|
62fab47791 | ||
|
|
914efc79d1 | ||
|
|
761c2c10ed | ||
|
|
ba3dd1c855 | ||
|
|
eb9c914548 | ||
|
|
3b67364b5c | ||
|
|
dcf28baef8 | ||
|
|
667cf31ea1 | ||
|
|
fa22cfe2d5 | ||
|
|
b27aa974cd | ||
|
|
027ac23760 | ||
|
|
9f58db799f | ||
|
|
b829d05346 | ||
|
|
d1e382e8e4 | ||
|
|
34304b936f | ||
|
|
a11c1a7092 | ||
|
|
d7858b8fb4 | ||
|
|
72f38c101d | ||
|
|
15be3b0eb0 | ||
|
|
69a0823db2 | ||
|
|
7321d323ee | ||
|
|
98eacbacb9 | ||
|
|
99f16bdcc6 | ||
|
|
436e838778 | ||
|
|
11bd69cf2d |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,2 +0,0 @@
|
||||
*.go.tmpl linguist-language=Go
|
||||
generated/** linguist-generated
|
||||
42
.github/ISSUE_TEMPLATE/bug_report.md
vendored
42
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,42 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Explain a problem you are experiencing
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hey! Thanks for opening an issue!
|
||||
|
||||
IMPORTANT: If you believe this bug is a security issue, please don't use this template and follow our [security guidelines](/doc/security.md).
|
||||
|
||||
It is recommended that you include screenshots and logs to help everyone achieve a shared understanding of the bug.
|
||||
|
||||
-->
|
||||
|
||||
**What happened?**
|
||||
|
||||
> Please be specific and include screenshots and logs!
|
||||
|
||||
**What did you expect to happen?**
|
||||
|
||||
> Please be specific and include proposed behavior!
|
||||
|
||||
**What is the simplest way to reproduce this behavior?**
|
||||
|
||||
**In what environment did you see this bug?**
|
||||
- Pinniped server version:
|
||||
- Pinniped client version:
|
||||
- Pinniped container image (if using a public container image):
|
||||
- Pinniped configuration (what IDP(s) are you using? what downstream credential minting mechanisms are you using?):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version (e.g., `kubeadm version`):
|
||||
- Cloud provider or hardware configuration:
|
||||
- OS (e.g: `cat /etc/os-release`):
|
||||
- Kernel (e.g. `uname -a`):
|
||||
- Others:
|
||||
|
||||
**What else is there to know about this bug?**
|
||||
35
.github/ISSUE_TEMPLATE/feature-proposal.md
vendored
35
.github/ISSUE_TEMPLATE/feature-proposal.md
vendored
@@ -1,35 +0,0 @@
|
||||
---
|
||||
name: Feature proposal
|
||||
about: Suggest a way to improve this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hey! Thanks for opening an issue!
|
||||
|
||||
It is recommended that you include screenshots and logs to help everyone achieve a shared understanding of the improvement.
|
||||
|
||||
-->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Are you considering submitting a PR for this feature?**
|
||||
|
||||
- **How will this project improvement be tested?**
|
||||
- **How does this change the current architecture?**
|
||||
- **How will this change be backwards compatible?**
|
||||
- **How will this feature be documented?**
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
@@ -1,13 +0,0 @@
|
||||
# See https://docs.github.com/en/github/administering-a-repository/enabling-and-disabling-version-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
41
.github/pull_request_template.md
vendored
41
.github/pull_request_template.md
vendored
@@ -1,41 +0,0 @@
|
||||
<!--
|
||||
Thank you for submitting a pull request for Pinniped!
|
||||
|
||||
Before submitting, please see the guidelines in CONTRIBUTING.md in this repo.
|
||||
|
||||
Please note that a project maintainer will need to review and provide an
|
||||
initial approval on the PR to cause CI tests to automatically start.
|
||||
Also note that if you push additional commits to the PR, those commits
|
||||
will need another initial approval before CI will pick them up.
|
||||
|
||||
Reminder: Did you remember to run all the linter, unit tests, and integration tests
|
||||
described in CONTRIBUTING.md on your branch before submitting this PR?
|
||||
|
||||
Below is a template to help you describe your PR.
|
||||
-->
|
||||
|
||||
<!--
|
||||
Provide a summary of your change. Feel free to use paragraphs or a bulleted list, for example:
|
||||
|
||||
- Improves performance by 10,000%.
|
||||
- Fixes all bugs.
|
||||
- Boils the oceans.
|
||||
|
||||
-->
|
||||
|
||||
<!--
|
||||
Does this PR fix one or more reported issues?
|
||||
If yes, use `Fixes #<issue number>` to automatically close the fixed issue(s) when the PR is merged.
|
||||
-->
|
||||
|
||||
**Release note**:
|
||||
|
||||
<!--
|
||||
Does this PR introduce a user-facing change?
|
||||
|
||||
If no, just write "NONE" in the release-note block below.
|
||||
If yes, a release note is required. Enter your extended release note in the block below.
|
||||
-->
|
||||
```release-note
|
||||
|
||||
```
|
||||
26
.gitignore
vendored
26
.gitignore
vendored
@@ -1,21 +1,7 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# goland
|
||||
.idea
|
||||
|
||||
# Intermediate files used by Tilt
|
||||
/hack/lib/tilt/build
|
||||
.terraform
|
||||
*.tfstate.*
|
||||
*.tfstate
|
||||
kubeconfig.yaml
|
||||
.DS_Store
|
||||
site/
|
||||
@@ -1,70 +0,0 @@
|
||||
# https://github.com/golangci/golangci-lint#config-file
|
||||
run:
|
||||
deadline: 1m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
# default linters
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
# additional linters for this project (we should disable these if they get annoying).
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- depguard
|
||||
- dogsled
|
||||
- exhaustive
|
||||
- exportloopref
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- goerr113
|
||||
- goheader
|
||||
- goimports
|
||||
- golint
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- misspell
|
||||
- nakedret
|
||||
- nestif
|
||||
- noctx
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- rowserrcheck
|
||||
- scopelint
|
||||
- sqlclosecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- whitespace
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# exclude tests from some rules for things that are useful in a testing context.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- goerr113
|
||||
|
||||
linters-settings:
|
||||
funlen:
|
||||
lines: 125
|
||||
statements: 50
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
goimports:
|
||||
local-prefixes: go.pinniped.dev
|
||||
@@ -1,17 +1,23 @@
|
||||
exclude: '^(generated|hack/lib/tilt/tilt_modules)/'
|
||||
# This is a configuration for https://pre-commit.com/.
|
||||
# On macOS, try `brew install pre-commit` and then run `pre-commit install`.
|
||||
repos:
|
||||
- repo: git://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
# TODO: find a version of this to validate ytt templates?
|
||||
# - id: check-yaml
|
||||
# args: ['--allow-multiple-documents']
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
- id: check-byte-order-marker
|
||||
- id: detect-private-key
|
||||
exclude: testdata
|
||||
- id: mixed-line-ending
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
# TODO: find a version of this to validate ytt templates?
|
||||
# - id: check-yaml
|
||||
# args: ['--allow-multiple-documents']
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
- id: check-byte-order-marker
|
||||
- id: detect-private-key
|
||||
- id: mixed-line-ending
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: validate-copyright-year
|
||||
name: Validate copyright year
|
||||
entry: hack/check-copyright-year.sh
|
||||
language: script
|
||||
|
||||
603
AD-SETUP.md
Normal file
603
AD-SETUP.md
Normal file
@@ -0,0 +1,603 @@
|
||||
# Creating an Active Directory server on Google Cloud for Pinniped integration tests
|
||||
|
||||
This documents the steps that were taken to create our test AD server used by the integration tests.
|
||||
The integration tests use LDAPS and StartTLS to connect to the AD server.
|
||||
|
||||
## Create a Windows Server VM and configure it as an AD Domain Controller
|
||||
|
||||
The steps in this section were mostly inspired by
|
||||
https://cloud.google.com/architecture/deploy-an-active-directory-forest-on-compute-engine.
|
||||
|
||||
From your Mac, create a VPC, subnet, firewall rules, admin password, reserved static IP, and the VM itself.
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Login as yourself.
|
||||
gcloud auth login
|
||||
|
||||
# Set some variables.
|
||||
project="REDACTED" # Change this to be the actual project name before running these commands.
|
||||
region="us-central1"
|
||||
zone="us-central1-b"
|
||||
vpc_name="ad"
|
||||
|
||||
# Create VPC.
|
||||
gcloud compute networks create ${vpc_name} \
|
||||
--project ${project} \
|
||||
--description "VPC network to deploy Active Directory" \
|
||||
--subnet-mode custom
|
||||
|
||||
# Create subnet.
|
||||
# The google tutorial says to "enable Private Google Access so that Windows can activate without internet access."
|
||||
gcloud compute networks subnets create domain-controllers \
|
||||
--project ${project} --region ${region} \
|
||||
--network ${vpc_name} \
|
||||
--range "10.0.0.0/28" \
|
||||
--enable-private-ip-google-access
|
||||
|
||||
# Create a firewall rule to allow RDP. Find out what your public IP address is by going to https://whatismyipaddress.com.
|
||||
# Replace the X.X.X.X placeholder address shown here with your real IPv4 address.
|
||||
my_ip=X.X.X.X
|
||||
gcloud compute firewall-rules create allow-rdp-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:3389 \
|
||||
--source-ranges "${my_ip}/32" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Allow LDAPS (port 636) from the whole internet.
|
||||
gcloud compute firewall-rules create allow-ldaps-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:636 \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Allow LDAP (port 389) from the whole internet, to allow the integration tests to use StartTLS.
|
||||
gcloud compute firewall-rules create allow-ldap-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--direction INGRESS \
|
||||
--action allow \
|
||||
--rules tcp:389 \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags ad-domaincontroller \
|
||||
--network ${vpc_name} \
|
||||
--priority 10000
|
||||
|
||||
# Reserve a static public IP address for the domain controller VM.
|
||||
addressOfDc1=$(gcloud compute addresses create ad-domain-controller \
|
||||
--project ${project} --region ${region} \
|
||||
--format="value(address)")
|
||||
|
||||
# Create an admin password for the Administrator user on Windows, and save it to secrets manager.
|
||||
password="$(openssl rand -hex 8)-$(openssl rand -hex 8)"
|
||||
echo -n "$password" > password.tmp
|
||||
gcloud secrets create active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
--data-file password.tmp
|
||||
rm password.tmp
|
||||
|
||||
# This creates a service account called ad-domaincontroller@PROJECT_NAME.iam.gserviceaccount.com
|
||||
# (where PROJECT_NAME is the actual GCP project name) and sets the account name to the
|
||||
# variable $dcServiceAccount.
|
||||
dcServiceAccount=$(gcloud iam service-accounts create ad-domaincontroller \
|
||||
--project ${project} \
|
||||
--display-name "AD Domain Controller VM Service Account" \
|
||||
--format "value(email)")
|
||||
|
||||
# Allow the new service account to temporarily read the Windows admin password from secret manager.
|
||||
# The following `date` command might only work on MacOS. It prints the time like this: 2024-10-23T19:20:36Z
|
||||
one_hour_from_now=$(TZ=UTC date -v "+1H" +"%Y-%m-%dT%H:%M:%SZ")
|
||||
gcloud secrets add-iam-policy-binding active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
"--member=serviceAccount:$dcServiceAccount" \
|
||||
--role=roles/secretmanager.secretAccessor \
|
||||
--condition="title=Expires after 1h,expression=request.time < timestamp('$one_hour_from_now')"
|
||||
|
||||
# Optional: list all bindings to see the binding that you just created.
|
||||
gcloud secrets get-iam-policy active-directory-dc1-password \
|
||||
--project ${project}
|
||||
|
||||
# Create a powershell startup script in a local file.
|
||||
cat <<"EOF" > dc-startup.ps1
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
#
|
||||
# Only run the script if the VM is not a domain controller already.
|
||||
#
|
||||
if ((Get-CimInstance -ClassName Win32_OperatingSystem).ProductType -eq 2) {
|
||||
exit
|
||||
}
|
||||
|
||||
#
|
||||
# Read configuration from metadata.
|
||||
#
|
||||
Import-Module "${Env:ProgramFiles}\Google\Compute Engine\sysprep\gce_base.psm1"
|
||||
|
||||
Write-Host "Reading metadata..."
|
||||
$ActiveDirectoryDnsDomain = Get-MetaData -Property "attributes/ActiveDirectoryDnsDomain" -instance_only
|
||||
$ActiveDirectoryNetbiosDomain = Get-MetaData -Property "attributes/ActiveDirectoryNetbiosDomain" -instance_only
|
||||
$ProjectId = Get-MetaData -Property "project-id" -project_only
|
||||
$AccessToken = (Get-MetaData -Property "service-accounts/default/token" | ConvertFrom-Json).access_token
|
||||
|
||||
#
|
||||
# Read the DSRM password from secret manager.
|
||||
#
|
||||
Write-Host "Reading secret from secret manager..."
|
||||
$Secret = (Invoke-RestMethod `
|
||||
-Headers @{
|
||||
"Metadata-Flavor" = "Google";
|
||||
"x-goog-user-project" = $ProjectId;
|
||||
"Authorization" = "Bearer $AccessToken"} `
|
||||
-Uri "https://secretmanager.googleapis.com/v1/projects/$ProjectId/secrets/active-directory-dc1-password/versions/latest:access")
|
||||
$DsrmPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Secret.payload.data))
|
||||
$DsrmPassword = ConvertTo-SecureString -AsPlainText $DsrmPassword -force
|
||||
|
||||
#
|
||||
# Promote.
|
||||
#
|
||||
Write-Host "Setting administrator password..."
|
||||
Set-LocalUser -Name Administrator -Password $DsrmPassword
|
||||
|
||||
Write-Host "Creating a new forest $ActiveDirectoryDnsDomain ($ActiveDirectoryNetbiosDomain)..."
|
||||
Install-ADDSForest `
|
||||
-DomainName $ActiveDirectoryDnsDomain `
|
||||
-DomainNetbiosName $ActiveDirectoryNetbiosDomain `
|
||||
-SafeModeAdministratorPassword $DsrmPassword `
|
||||
-DomainMode Win2008R2 `
|
||||
-ForestMode Win2008R2 `
|
||||
-InstallDns `
|
||||
-CreateDnsDelegation:$False `
|
||||
-NoRebootOnCompletion:$True `
|
||||
-Confirm:$false
|
||||
|
||||
#
|
||||
# Configure DNS.
|
||||
#
|
||||
Write-Host "Configuring DNS settings..."
|
||||
Get-Netadapter| Disable-NetAdapterBinding -ComponentID ms_tcpip6
|
||||
Set-DnsClientServerAddress `
|
||||
-InterfaceIndex (Get-NetAdapter -Name Ethernet).InterfaceIndex `
|
||||
-ServerAddresses 127.0.0.1
|
||||
|
||||
#
|
||||
# Enable LSA protection.
|
||||
#
|
||||
New-ItemProperty `
|
||||
-Path "HKLM:\SYSTEM\CurrentControlSet\Control\Lsa" `
|
||||
-Name "RunAsPPL" `
|
||||
-Value 1 `
|
||||
-PropertyType DWord
|
||||
|
||||
Write-Host "Restarting to apply all settings..."
|
||||
Restart-Computer
|
||||
EOF
|
||||
|
||||
# Create a domain controller VM.
|
||||
# E2 are the cheapest VMs. e2-medium has 2 vCPUs (shared with other customers) and 4 GB of memory.
|
||||
# See https://cloud.google.com/compute/docs/general-purpose-machines#e2-shared-core.
|
||||
# When we originally set up this VM, we actually started it as n2-standard-2 and after we
|
||||
# finished setting up everything as shown in this guide, then we stopped the VM and changed its
|
||||
# type to e2-medium and started the VM again. Maybe it would work fine to create it as
|
||||
# e2-medium from the beginning, but note that we didn't actually test that.
|
||||
gcloud compute instances create active-directory-dc1 \
|
||||
--project ${project} \
|
||||
--zone ${zone} \
|
||||
--image-family windows-2022 \
|
||||
--image-project windows-cloud \
|
||||
--machine-type e2-medium \
|
||||
--tags ad-domaincontroller \
|
||||
--metadata "ActiveDirectoryDnsDomain=activedirectory.test.pinniped.dev,ActiveDirectoryNetbiosDomain=pinniped-ad,sysprep-specialize-script-ps1=Install-WindowsFeature AD-Domain-Services -IncludeManagementTools; Install-WindowsFeature DNS,disable-account-manager=true" \
|
||||
--metadata-from-file windows-startup-script-ps1=dc-startup.ps1 \
|
||||
--address ${addressOfDc1} \
|
||||
--subnet=domain-controllers \
|
||||
--service-account "$dcServiceAccount" \
|
||||
--scopes cloud-platform \
|
||||
--shielded-integrity-monitoring \
|
||||
--shielded-secure-boot \
|
||||
--shielded-vtpm
|
||||
|
||||
# Monitor the initialization process of the first domain controller by viewing its serial port output.
|
||||
# It should install the sysprep stuff, reboot, run our startup script, and then reboot again.
|
||||
gcloud compute instances tail-serial-port-output active-directory-dc1 \
|
||||
--project ${project} \
|
||||
--zone ${zone}
|
||||
# Use CTRL-C to cancel tailing the output.
|
||||
```
|
||||
|
||||
## Update DNS
|
||||
|
||||
Update the Cloud DNS entry for `activedirectory.test.pinniped.dev.` to be an "A" record pointing to the
|
||||
public static IP of the VM. This is easier to do in the Cloud DNS UI in your browser.
|
||||
It would take many gcloud CLI commands to accomplish the same task.
|
||||
|
||||
## Configure test users and groups
|
||||
|
||||
Make sure you have an RDP client installed. On a Mac, you can install RDP from the App Store.
|
||||
It was recently renamed "Windows App".
|
||||
|
||||
Note: To copy/paste in the RDP client, you may need to use CTRL-C/CTRL-V if CMD-C/CMD-V don't work.
|
||||
|
||||
RDP into the Windows VM. To connect, use `activedirectory.test.pinniped.dev` as the name of the server,
|
||||
the username `Administrator`, and the password from the `active-directory-dc1-password` entry in Secrets Manager.
|
||||
You can ignore the RDP certificate error.
|
||||
|
||||
In your RDP session, open Powershell. Then run the following commands to add some users and groups,
|
||||
change the password policy, and grant some permissions.
|
||||
|
||||
Before running the commands, replace the redacted passwords as follows:
|
||||
- The value for `REDACTED_BIND_USER_PASSWORD` can be found at `aws-ad-bind-account-password` in the `concourse-secrets` secret
|
||||
- The value for `REDACTED_PINNY_USER_PASSWORD` can be found at `aws-ad-user-password` in the `concourse-secrets` secret
|
||||
- The value for `REDACTED_DEACTIVATED_USER_PASSWORD` can be found at `aws-ad-deactivated-user-password` in the `concourse-secrets` secret
|
||||
|
||||
```shell
|
||||
New-ADOrganizationalUnit -Name "pinniped-ad" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
New-ADOrganizationalUnit -Name "Users" `
|
||||
-Path "OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
New-ADOrganizationalUnit -Name "test-users" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-Description "integration tests will create and delete ephemeral users here" `
|
||||
-ProtectedFromAccidentalDeletion $false
|
||||
|
||||
# Print all OUs to validate that they were created.
|
||||
Get-ADOrganizationalUnit -Filter *
|
||||
|
||||
New-ADUser -Name "Bind User" -SamAccountName "bind-user" -GivenName "Bind" -Surname "User" -DisplayName "Bind User" `
|
||||
-UserPrincipalName "bind-user@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_BIND_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $true -PasswordNeverExpires $true
|
||||
|
||||
# Note that the value of EmailAddress is not a real email address, but that's okay.
|
||||
New-ADUser -Name "Pinny Seal" -SamAccountName "pinny" -GivenName "Pinny" -Surname "Seal" -DisplayName "Pinny Seal" `
|
||||
-UserPrincipalName "pinny@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-EmailAddress "tanzu-user-authentication@groups.vmware.com" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_PINNY_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $true -PasswordNeverExpires $true
|
||||
|
||||
New-ADUser -Name "Deactivated User" -SamAccountName "deactivated-user" -GivenName "Deactivated" -Surname "User" -DisplayName "Deactivated User" `
|
||||
-UserPrincipalName "deactivated-user@activedirectory.test.pinniped.dev" `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" `
|
||||
-AccountPassword (ConvertTo-SecureString "REDACTED_DEACTIVATED_USER_PASSWORD" -AsPlainText -Force) `
|
||||
-Enabled $false -PasswordNeverExpires $true
|
||||
|
||||
# Take note of the pinny account's ObjectGUID. You will need to edit the concourse-secrets secret later to update this GUID value.
|
||||
# This value should look something like "288188dd-ab76-4f61-b6e4-c72e081502c5".
|
||||
Get-ADUser pinny -Properties * | Select SamaccountName,ObjectGUID
|
||||
|
||||
# Print all users to validate that they were created.
|
||||
Get-ADUser -Filter *
|
||||
|
||||
New-ADGroup -Name "Marine Mammals" -SamAccountName "Marine Mammals" -DisplayName "Marine Mammals" `
|
||||
-GroupCategory Security -GroupScope Global `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
|
||||
Add-ADGroupMember -Identity "Marine Mammals" -Members "pinny"
|
||||
|
||||
New-ADGroup -Name "Mammals" -SamAccountName "Mammals" -DisplayName "Mammals" `
|
||||
-GroupCategory Security -GroupScope Global `
|
||||
-Path "OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
|
||||
Add-ADGroupMember -Identity "Mammals" -Members "Marine Mammals"
|
||||
|
||||
# Change the default password policy. There are some integration tests that rely on this.
|
||||
# This is the equivalent of doing this in the Windows "Active Directory Administrative Center" UI:
|
||||
# check "enforce account lockout policy", give it 20 failed attempts and a 15-minute reset, then
|
||||
# uncheck "enforce minimum password age" so we can change the password immediately upon creating a user.
|
||||
Set-ADDefaultDomainPasswordPolicy -Identity "activedirectory.test.pinniped.dev" `
|
||||
-LockoutThreshold 20 -LockoutDuration "00:15:00" -LockoutObservationWindow "00:15:00" `
|
||||
-MinPasswordAge 0
|
||||
|
||||
# Print the policy to validate that it was updated.
|
||||
Get-ADDefaultDomainPasswordPolicy
|
||||
|
||||
# We need to allow the bind-user to create/delete/edit users and groups within the test-users OU, because several
|
||||
# integration tests want to crate/delete/edit ephemeral test users and groups.
|
||||
# These access control steps were inspired by https://the-itguy.de/delegate-access-in-active-directory-with-powershell/.
|
||||
# This is intended to be the equivalent of using the UI to assign permissions like this: right click on "test-users",
|
||||
# select Delegate Control, select "bind-user" as the user, select "create, delete and manage user accounts" and
|
||||
# "reset user passwords" as the tasks to delegate.
|
||||
function New-ADDGuidMap
|
||||
{
|
||||
$rootdse = Get-ADRootDSE
|
||||
$guidmap = @{ }
|
||||
$GuidMapParams = @{
|
||||
SearchBase = ($rootdse.SchemaNamingContext)
|
||||
LDAPFilter = "(schemaidguid=*)"
|
||||
Properties = ("lDAPDisplayName", "schemaIDGUID")
|
||||
}
|
||||
Get-ADObject @GuidMapParams | ForEach-Object { $guidmap[$_.lDAPDisplayName] = [System.GUID]$_.schemaIDGUID }
|
||||
return $guidmap
|
||||
}
|
||||
$GuidMap = New-ADDGuidMap
|
||||
$BindUserSID = New-Object System.Security.Principal.SecurityIdentifier (Get-ADUser "bind-user").SID
|
||||
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
$ace1 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["user"]
|
||||
$ace2 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["user"], "All"
|
||||
$ace3 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "GenericAll", "Allow", "Descendents", $GuidMap["group"]
|
||||
$ace4 = New-Object System.DirectoryServices.ActiveDirectoryAccessRule $BindUserSID, "CreateChild, DeleteChild", "Allow", $GuidMap["group"], "All"
|
||||
$acl.AddAccessRule($ace1)
|
||||
$acl.AddAccessRule($ace2)
|
||||
$acl.AddAccessRule($ace3)
|
||||
$acl.AddAccessRule($ace4)
|
||||
Set-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev" -AclObject $acl
|
||||
|
||||
# Print the access control rules that were just applied.
|
||||
$acl = Get-Acl -Path "AD:OU=test-users,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev"
|
||||
$acl.Access | Where-Object { $_.IdentityReference -eq "pinniped-ad\bind-user" }
|
||||
```
|
||||
|
||||
If you would like to see these OUs, users, and groups in the UI, you can open the "Active Directory Users and Computers"
|
||||
app in your RDP session.
|
||||
|
||||
## Configure a CA and a serving certificate for LDAPS
|
||||
|
||||
Now we need to create and configure a TLS serving certificate for LDAPS.
|
||||
|
||||
The certificate needs to include two hostnames. One of the hostnames is the name that the AD server
|
||||
thinks is its own hostname (`active-directory-dc1.activedirectory.test.pinniped.dev`).
|
||||
This is how the AD server will decide to use this cert for the LDAPS port.
|
||||
The other hostname is the one that clients will use when making connections from the outside
|
||||
(`activedirectory.test.pinniped.dev`) so they can validate the server certificate.
|
||||
|
||||
The steps here were inspired by https://gist.github.com/magnetikonline/0ccdabfec58eb1929c997d22e7341e45.
|
||||
|
||||
On your mac:
|
||||
|
||||
```shell
|
||||
# On your Mac: Create a self-signed CA public/private keypair.
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout ad-ca.key -out ad-ca.crt \
|
||||
-sha256 -days 36500 -nodes \
|
||||
-subj "/C=US/ST=California/L=San Francisco/O=Pinniped/OU=Pinniped CI/CN=Pinniped AD CA"
|
||||
|
||||
# Copy the public key to your clipboard.
|
||||
cat ad-ca.crt| pbcopy
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the public
|
||||
# key again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\ca.crt"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\ca.crt"
|
||||
|
||||
# Import root certificate into trusted store of domain controller in your Powershell terminal:
|
||||
Import-Certificate -FilePath "C:\users\administrator\desktop\ca.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
```
|
||||
|
||||
If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
|
||||
and look in the folder called "Trusted Root Certification Authorities\Certificates".
|
||||
If the UI was already open, click the refresh button.
|
||||
|
||||
Copy the following file contents to your clipboard:
|
||||
|
||||
```shell
|
||||
[Version]
|
||||
Signature="$Windows NT$"
|
||||
|
||||
[NewRequest]
|
||||
Subject = "CN=activedirectory.test.pinniped.dev"
|
||||
KeySpec = 1
|
||||
KeyLength = 2048
|
||||
Exportable = TRUE
|
||||
MachineKeySet = TRUE
|
||||
SMIME = FALSE
|
||||
PrivateKeyArchive = FALSE
|
||||
UserProtected = FALSE
|
||||
UseExistingKeySet = FALSE
|
||||
ProviderName = "Microsoft RSA SChannel Cryptographic Provider"
|
||||
ProviderType = 12
|
||||
RequestType = PKCS10
|
||||
KeyUsage = 0xa0
|
||||
|
||||
[EnhancedKeyUsageExtension]
|
||||
OID = 1.3.6.1.5.5.7.3.1 ; Server Authentication
|
||||
|
||||
[Extensions]
|
||||
2.5.29.17 = "{text}"
|
||||
_continue_ = "DNS=activedirectory.test.pinniped.dev"
|
||||
_continue_ = "DNS=active-directory-dc1.activedirectory.test.pinniped.dev"
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to copy the file contents
|
||||
# from above again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\request.inf"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\request.inf"
|
||||
|
||||
# Create a CSR. This command will also generate a private key for the AD server and save it.
|
||||
certreq -new "C:\users\administrator\desktop\request.inf" "C:\users\administrator\desktop\client.csr"
|
||||
|
||||
# Show the CSR.
|
||||
type "C:\users\administrator\desktop\client.csr"
|
||||
|
||||
# Copy the content of this file to your clipboard.
|
||||
Get-Content "C:\users\administrator\desktop\client.csr" | Set-Clipboard
|
||||
```
|
||||
|
||||
On your mac:
|
||||
|
||||
```shell
|
||||
# On your Mac, use the CA to issue a serving cert based on the CSR.
|
||||
pbpaste > client.csr
|
||||
|
||||
cat <<EOF > v3ext.txt
|
||||
keyUsage=digitalSignature,keyEncipherment
|
||||
extendedKeyUsage=serverAuth
|
||||
subjectKeyIdentifier=hash
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = activedirectory.test.pinniped.dev
|
||||
DNS.2 = active-directory-dc1.activedirectory.test.pinniped.dev
|
||||
EOF
|
||||
|
||||
# Create a cert from the CSR signed by the CA.
|
||||
openssl x509 \
|
||||
-req -days 36500 \
|
||||
-in client.csr -CA ad-ca.crt -CAkey ad-ca.key -extfile v3ext.txt \
|
||||
-set_serial 01 -out client.crt
|
||||
|
||||
# Inspect the generated certificate.
|
||||
# Ensure the following X509v3 extensions are all present:
|
||||
# Key Usage: Digital Signature, Key Encipherment
|
||||
# Extended Key Usage: TLS Web Server Authentication
|
||||
# Subject Key Identifier
|
||||
# Subject Alternative Name with 2 DNS hostnames
|
||||
# Authority Key Identifier
|
||||
openssl x509 -in client.crt -text
|
||||
|
||||
# Copy the generated cert.
|
||||
cat client.crt | pbcopy
|
||||
```
|
||||
|
||||
In Powershell terminal:
|
||||
|
||||
```shell
|
||||
# In your Windows RDP session's Powershell terminal, put the content of the clipboard into a file.
|
||||
# Note that if you copy/paste this command to your RDP session, then you need to pbcopy the file contents
|
||||
# from above again before you hit return for this command.
|
||||
Get-Clipboard | Out-File -FilePath "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# In Powershell terminal, check that the file exists and looks correct.
|
||||
type "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# Add the serving certificate to Windows. This will also automatically associate it to the private key that you
|
||||
# generated with the previous usage of certreq.
|
||||
certreq -accept "C:\users\administrator\desktop\client.crt"
|
||||
|
||||
# If you want to validate that this was imported, open the UI tool called "Manage computer certificates"
|
||||
# and look in the folder called "Personal\Certificates". If the UI was already open, click the refresh button.
|
||||
# Double click on the cert. Ensure that it says, "you have a private key that corresponds to this certificate".
|
||||
# Next, we need to reboot the VM for the cert to get picked up and used for serving incoming LDAPS connections.
|
||||
# After showing you a warning dialog box, this should terminate your RDP session and stop the VM.
|
||||
shutdown /s
|
||||
```
|
||||
|
||||
Wait for the VM to stop, then start the VM again from your Mac:
|
||||
|
||||
```shell
|
||||
gcloud compute instances start active-directory-dc1 --project ${project} --zone ${zone}
|
||||
```
|
||||
|
||||
Wait for the VM to finish booting. Then we can confirm that LDAPS is working. On your Mac:
|
||||
|
||||
```shell
|
||||
# Check that serving cert is being returned on the LDAPS port. This command should show the cert chain.
|
||||
# It should also verify the server cert using our CA. The output should include "Verify return code: 0 (ok)".
|
||||
openssl s_client -connect activedirectory.test.pinniped.dev:636 -showcerts -CAfile ad-ca.crt < /dev/null
|
||||
|
||||
# Unfortunately, the ldapsearch command that comes pre-installed on MacOS does not seem to respect
|
||||
# the LDAPTLS_CACERT env variable. So it will not be able to validate the server certificates.
|
||||
# As a workaround, we can use docker to run ldapsearch commands in a linux container.
|
||||
|
||||
# Test the regular LDAP port by issuing a query on your Mac. The -ZZ option asks it to use StartTLS.
|
||||
# This should list all users. Replace REDACTED_BIND_USER_PASSWORD with the real password.
|
||||
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
|
||||
ldapsearch -d8 -v -x -ZZ -H 'ldap://activedirectory.test.pinniped.dev' \
|
||||
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-w 'REDACTED_BIND_USER_PASSWORD' \
|
||||
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-s sub \
|
||||
'(objectClass=user)' '*'
|
||||
|
||||
# Test the LDAPS port by issuing a query on your Mac. This should list all users.
|
||||
# Replace REDACTED_BIND_USER_PASSWORD with the real password.
|
||||
docker run -v "$(pwd):/certs" -e LDAPTLS_CACERT="/certs/ad-ca.crt" --rm -it bitnami/openldap \
|
||||
ldapsearch -d8 -v -x -H 'ldaps://activedirectory.test.pinniped.dev' \
|
||||
-D 'CN=Bind User,OU=Users,OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-w 'REDACTED_BIND_USER_PASSWORD' \
|
||||
-b 'OU=pinniped-ad,DC=activedirectory,DC=test,DC=pinniped,DC=dev' \
|
||||
-s sub \
|
||||
'(objectClass=user)' '*'
|
||||
```
|
||||
|
||||
## Update the `concourse-secrets` secret in GCP Secrets Manager
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Copy the CA's public cert.
|
||||
cat ad-ca.crt | base64 | pbcopy
|
||||
|
||||
# cd to your local clone of the `ci` branch of the pinniped repo
|
||||
cd pinniped-ci-branch
|
||||
|
||||
# Edit the secret.
|
||||
./hack/edit-gcloud-secret.sh concourse-secret
|
||||
# This opens vim to edit the secret.
|
||||
# Paste the cert as the value for `aws-ad-ca-data`.
|
||||
# Also edit the the value of `aws-ad-user-unique-id-attribute-value`. The value should be the ObjectGUID of the pinny
|
||||
# user that you created in the steps above.
|
||||
# Save your changes, exit vim, and when prompted say that you want to save this as the new version of concourse-secrets.
|
||||
```
|
||||
|
||||
## Confirm that Active Directory integration tests can pass
|
||||
|
||||
Use these commands run all the Active Directory integration tests on your Mac.
|
||||
The `-run` filter is based on the tests as they existed at the time of writing this doc.
|
||||
You can find AD tests by searching for `SkipTestWhenActiveDirectoryIsUnavailable`.
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Login so we can read the secrets from GCP Secret Manager.
|
||||
gcloud auth login
|
||||
|
||||
# cd to your local git clone
|
||||
cd pinniped
|
||||
|
||||
# Compile and install onto a local kind cluster.
|
||||
./hack/prepare-for-integration-tests.sh -c --get-active-directory-vars "../pinniped-ci-branch/hack/get-aws-ad-env-vars.sh"
|
||||
|
||||
# Run all the tests that depend on AD.
|
||||
source /tmp/integration-test-env && go test -v -race -count 1 -timeout 0 ./test/integration \
|
||||
-run "/TestSupervisorLogin_Browser/active_directory|/TestE2EFullIntegration_Browser/with_Supervisor_ActiveDirectory|/TestActiveDirectoryIDPPhaseAndConditions_Parallel|/TestSupervisorWarnings_Browser/Active_Directory"
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
On your Mac:
|
||||
|
||||
```shell
|
||||
# Remove all bindings for the service account from the secret.
|
||||
# The binding was only needed during the first boot of the VM.
|
||||
gcloud secrets remove-iam-policy-binding active-directory-dc1-password \
|
||||
--project ${project} \
|
||||
--member "serviceAccount:${dcServiceAccount}" --role roles/secretmanager.secretAccessor \
|
||||
--all
|
||||
|
||||
# Remove the firewall rule which allows incoming RDP connections.
|
||||
# If you need to RDP to this AD VM in the future, then you will need to create
|
||||
# a new firewall rule to allow it.
|
||||
gcloud compute firewall-rules delete allow-rdp-ingress-to-addc \
|
||||
--project ${project} \
|
||||
--quiet
|
||||
|
||||
# Remove all temp files. It's okay to remove the private key for our CA because we
|
||||
# created certs that are good for 100 years, as long as you have already added the
|
||||
# public cert to the concourse-secrets secret. If we need to create a new AD VM, we
|
||||
# can also create a new CA.
|
||||
rm ad-ca.crt ad-ca.key client.crt client.csr v3ext.txt
|
||||
```
|
||||
@@ -1,9 +0,0 @@
|
||||
# Pinniped Adopters
|
||||
|
||||
These organizations are using Pinniped.
|
||||
|
||||
* [VMware Tanzu](https://tanzu.vmware.com/) ([Tanzu Mission Control](https://tanzu.vmware.com/mission-control))
|
||||
|
||||
If you are using Pinniped and are not on this list, you can open a [pull
|
||||
request](https://github.com/vmware-tanzu/pinniped/issues/new?template=feature-proposal.md)
|
||||
to add yourself.
|
||||
@@ -1,84 +1 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [oss-coc@vmware.com](mailto:oss-coc@vmware.com). All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0,
|
||||
available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
|
||||
Please see https://github.com/vmware-tanzu/pinniped/blob/main/CODE_OF_CONDUCT.md
|
||||
|
||||
166
CONTRIBUTING.md
166
CONTRIBUTING.md
@@ -1,165 +1 @@
|
||||
# Contributing to Pinniped
|
||||
|
||||
Contributions to Pinniped are welcome. Here are some things to help you get started.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Please see the [Code of Conduct](./CODE_OF_CONDUCT.md).
|
||||
|
||||
## Project Scope
|
||||
|
||||
Learn about the [scope](doc/scope.md) of the project.
|
||||
|
||||
## Meeting with the Maintainers
|
||||
|
||||
The maintainers aspire to hold a video conference every other week with the Pinniped community.
|
||||
Any community member may request to add topics to the agenda by contacting a [maintainer](MAINTAINERS.md)
|
||||
in advance, or by attending and raising the topic during time remaining after the agenda is covered.
|
||||
Typical agenda items include topics regarding the roadmap, feature requests, bug reports, pull requests, etc.
|
||||
A [public document](https://docs.google.com/document/d/1qYA35wZV-6bxcH5375vOnIGkNBo7e4OROgsV4Sj8WjQ)
|
||||
tracks the agendas and notes for these meetings.
|
||||
|
||||
These meetings are currently scheduled for the first and third Thursday mornings of each month
|
||||
at 9 AM Pacific Time, using this [Zoom meeting](https://VMware.zoom.us/j/94638309756?pwd=V3NvRXJIdDg5QVc0TUdFM2dYRzgrUT09).
|
||||
If the meeting day falls on a US holiday, please consider that occurrence of the meeting to be canceled.
|
||||
|
||||
## Discussion
|
||||
|
||||
Got a question, comment, or idea? Please don't hesitate to reach out via the GitHub [Discussions](https://github.com/vmware-tanzu/pinniped/discussions) tab at the top of this page.
|
||||
|
||||
## Issues
|
||||
|
||||
Need an idea for a project to get started contributing? Take a look at the open
|
||||
[issues](https://github.com/vmware-tanzu/pinniped/issues).
|
||||
Also check to see if any open issues are labeled with
|
||||
["good first issue"](https://github.com/vmware-tanzu/pinniped/labels/good%20first%20issue)
|
||||
or ["help wanted"](https://github.com/vmware-tanzu/pinniped/labels/help%20wanted).
|
||||
|
||||
### Bugs
|
||||
|
||||
To file a bug report, please first open an
|
||||
[issue](https://github.com/vmware-tanzu/pinniped/issues/new?template=bug_report.md). The project team
|
||||
will work with you on your bug report.
|
||||
|
||||
Once the bug has been validated, a [pull request](https://github.com/vmware-tanzu/pinniped/compare)
|
||||
can be opened to fix the bug.
|
||||
|
||||
For specifics on what to include in your bug report, please follow the
|
||||
guidelines in the issue and pull request templates.
|
||||
|
||||
### Features
|
||||
|
||||
To suggest a feature, please first open an
|
||||
[issue](https://github.com/vmware-tanzu/pinniped/issues/new?template=feature-proposal.md)
|
||||
and tag it with `proposal`, or create a new [Discussion](https://github.com/vmware-tanzu/pinniped/discussions).
|
||||
The project team will work with you on your feature request.
|
||||
|
||||
Once the feature request has been validated, a [pull request](https://github.com/vmware-tanzu/pinniped/compare)
|
||||
can be opened to implement the feature.
|
||||
|
||||
For specifics on what to include in your feature request, please follow the
|
||||
guidelines in the issue and pull request templates.
|
||||
|
||||
## CLA
|
||||
|
||||
We welcome contributions from everyone but we can only accept them if you sign
|
||||
our Contributor License Agreement (CLA). If you would like to contribute and you
|
||||
have not signed it, our CLA-bot will walk you through the process when you open
|
||||
a Pull Request. For questions about the CLA process, see the
|
||||
[FAQ](https://cla.vmware.com/faq) or submit a question through the GitHub issue
|
||||
tracker.
|
||||
|
||||
## Building
|
||||
|
||||
The [Dockerfile](Dockerfile) at the root of the repo can be used to build and
|
||||
package the code. After making a change to the code, rebuild the docker image with the following command.
|
||||
|
||||
```bash
|
||||
# From the root directory of the repo...
|
||||
docker build .
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Running Lint
|
||||
|
||||
```bash
|
||||
./hack/module.sh lint
|
||||
```
|
||||
|
||||
### Running Unit Tests
|
||||
|
||||
```bash
|
||||
./hack/module.sh units
|
||||
```
|
||||
|
||||
### Running Integration Tests
|
||||
|
||||
1. Install dependencies:
|
||||
|
||||
- [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start)
|
||||
- [`tilt`](https://docs.tilt.dev/install.html)
|
||||
- [`ytt`](https://carvel.dev/#getting-started)
|
||||
- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
- [`chromedriver`](https://chromedriver.chromium.org/) (and [Chrome](https://www.google.com/chrome/))
|
||||
|
||||
On macOS, these tools can be installed with [Homebrew](https://brew.sh/) (assuming you have Chrome installed already):
|
||||
|
||||
```bash
|
||||
brew install kind tilt-dev/tap/tilt k14s/tap/ytt kubectl chromedriver
|
||||
```
|
||||
|
||||
1. Create a local Kubernetes cluster using `kind`:
|
||||
|
||||
```bash
|
||||
./hack/kind-up.sh
|
||||
```
|
||||
|
||||
1. Install Pinniped and supporting dependencies using `tilt`:
|
||||
|
||||
```bash
|
||||
./hack/tilt-up.sh
|
||||
```
|
||||
|
||||
Tilt will continue running and live-updating the Pinniped deployment whenever the code changes.
|
||||
|
||||
1. Run the Pinniped integration tests:
|
||||
|
||||
```bash
|
||||
source /tmp/integration-test-env && go test -v -count 1 ./test/integration
|
||||
```
|
||||
|
||||
To uninstall the test environment, run `./hack/tilt-down.sh`.
|
||||
To destroy the local Kubernetes cluster, run `./hack/kind-down.sh`.
|
||||
|
||||
### Observing Tests on the Continuous Integration Environment
|
||||
|
||||
[CI](https://hush-house.pivotal.io/teams/tanzu-user-auth/pipelines/pinniped-pull-requests)
|
||||
will not be triggered on a pull request until the pull request is reviewed and
|
||||
approved for CI by a project [maintainer](MAINTAINERS.md). Once CI is triggered,
|
||||
the progress and results will appear on the Github page for that
|
||||
[pull request](https://github.com/vmware-tanzu/pinniped/pulls) as checks. Links
|
||||
will appear to view the details of each check.
|
||||
|
||||
## Documentation
|
||||
|
||||
Any pull request which adds a new feature or changes the behavior of any feature which was previously documented
|
||||
should include updates to the documentation. All documentation lives in this repository. This project aspires to
|
||||
follow the Kubernetes [documentation style guide](https://kubernetes.io/docs/contribute/style/style-guide).
|
||||
|
||||
## Pre-commit Hooks
|
||||
|
||||
This project uses [pre-commit](https://pre-commit.com/) to agree on some conventions about whitespace/file encoding.
|
||||
|
||||
```bash
|
||||
$ brew install pre-commit
|
||||
[...]
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
## Becoming a Pinniped Maintainer
|
||||
|
||||
Regular contributors who are active in the Pinniped community and who have contributed at least several
|
||||
significant pull requests may be considered for promotion to become a maintainer upon request. Please
|
||||
contact an existing [maintainer](MAINTAINERS.md) if you would like to be considered.
|
||||
Please see https://github.com/vmware-tanzu/pinniped/blob/main/CONTRIBUTING.md
|
||||
|
||||
41
Dockerfile
41
Dockerfile
@@ -1,41 +0,0 @@
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM golang:1.15.3 as build-env
|
||||
|
||||
WORKDIR /work
|
||||
# Get dependencies first so they can be cached as a layer
|
||||
COPY go.* ./
|
||||
COPY generated/1.19/apis/go.* ./generated/1.19/apis/
|
||||
COPY generated/1.19/client/go.* ./generated/1.19/client/
|
||||
RUN go mod download
|
||||
|
||||
# Copy only the production source code to avoid cache misses when editing other files
|
||||
COPY generated ./generated
|
||||
COPY cmd ./cmd
|
||||
COPY internal ./internal
|
||||
COPY tools ./tools
|
||||
COPY hack ./hack
|
||||
|
||||
# Build the executable binary (CGO_ENABLED=0 means static linking)
|
||||
RUN mkdir out \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(hack/get-ldflags.sh)" -o out ./cmd/pinniped-concierge/... \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(hack/get-ldflags.sh)" -o out ./cmd/pinniped-supervisor/... \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out ./cmd/local-user-authenticator/...
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.6-slim
|
||||
|
||||
# Copy the binaries from the build-env stage
|
||||
COPY --from=build-env /work/out/pinniped-concierge /usr/local/bin/pinniped-concierge
|
||||
COPY --from=build-env /work/out/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
||||
COPY --from=build-env /work/out/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||
|
||||
# Document the ports
|
||||
EXPOSE 8080 8443
|
||||
|
||||
# Run as non-root for security posture
|
||||
USER 1001:1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
||||
@@ -1,17 +1 @@
|
||||
# Pinniped Maintainers
|
||||
|
||||
This is the current list of maintainers for the Pinniped project.
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| --------------- | --------- | ----------- |
|
||||
| Andrew Keesler | [ankeesler](https://github.com/ankeesler) | [VMware](https://www.github.com/vmware/) |
|
||||
| Matt Moyer | [mattmoyer](https://github.com/mattmoyer) | [VMware](https://www.github.com/vmware/) |
|
||||
| Pablo Schuhmacher | [pabloschuhmacher](https://github.com/pabloschuhmacher) | [VMware](https://www.github.com/vmware/) |
|
||||
| Ryan Richard | [cfryanr](https://github.com/cfryanr) | [VMware](https://www.github.com/vmware/) |
|
||||
|
||||
## Pinniped Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
| ----------------------------- | :---------------------: |
|
||||
| Technical Lead | Matt Moyer (mattmoyer) |
|
||||
| Product Management | Pablo Schuhmacher (pabloschuhmacher) |
|
||||
Please see https://github.com/vmware-tanzu/pinniped/blob/main/MAINTAINERS.md
|
||||
|
||||
230
README.md
230
README.md
@@ -1,55 +1,219 @@
|
||||
<img src="doc/img/pinniped_logo.svg" alt="Pinniped Logo" width="100%"/>
|
||||
# Pinniped's `ci` branch
|
||||
|
||||
## Overview
|
||||
This `ci` branch contains the CI/CD tooling for [Pinniped](https://github.com/vmware-tanzu/pinniped).
|
||||
|
||||
Pinniped provides identity services to Kubernetes.
|
||||
The documentation and code in this branch is mainly intended for the maintainers of Pinniped.
|
||||
|
||||
Pinniped allows cluster administrators to easily plug in external identity
|
||||
providers (IDPs) into Kubernetes clusters. This is achieved via a uniform
|
||||
install procedure across all types and origins of Kubernetes clusters,
|
||||
declarative configuration via Kubernetes APIs, enterprise-grade integrations
|
||||
with IDPs, and distribution-specific integration strategies.
|
||||
This branch is not intended to be merged to the `main` branch.
|
||||
|
||||
### Example Use Cases
|
||||
The code in the branch previously lived in a private repository. It was made public by moving
|
||||
the code into the `ci` branch of the Pinniped repository in late 2024. The previous git history
|
||||
for these files was not copied from the private repository at the time of this migration.
|
||||
|
||||
* Your team uses a large enterprise IDP, and has many clusters that they
|
||||
manage. Pinniped provides:
|
||||
* Seamless and robust integration with the IDP
|
||||
* Easy installation across clusters of any type and origin
|
||||
* A simplified login flow across all clusters
|
||||
* Your team shares a single cluster. Pinniped provides:
|
||||
* Simple configuration to integrate an IDP
|
||||
* Individual, revocable identities
|
||||
## Reporting an issue in this branch
|
||||
|
||||
### Architecture
|
||||
Found a bug or would like to make an enhancement request?
|
||||
Please report issues in [this repo](https://github.com/vmware-tanzu/pinniped).
|
||||
|
||||
Pinniped offers credential exchange to enable a user to exchange an external IDP
|
||||
credential for a short-lived, cluster-specific credential. Pinniped supports various
|
||||
IDP types and implements different integration strategies for various Kubernetes
|
||||
distributions to make authentication possible.
|
||||
## Reporting security vulnerabilities
|
||||
|
||||
To learn more, see [doc/architecture.md](doc/architecture.md).
|
||||
Please follow the procedure described in [SECURITY.md](https://github.com/vmware-tanzu/pinniped/blob/main/SECURITY.md).
|
||||
|
||||
<img src="doc/img/pinniped_architecture.svg" alt="Pinniped Architecture Sketch" width="300px"/>
|
||||
## Creating a release
|
||||
|
||||
## Trying Pinniped
|
||||
When the team is preparing to ship a release, a maintainer will create a new
|
||||
GitHub [Issue](https://github.com/vmware-tanzu/pinniped/issues/new/choose) in this repo to
|
||||
collaboratively track progress on the release checklist. As tasks are completed,
|
||||
the team will check them off. When all the tasks are completed, the issue is closed.
|
||||
|
||||
Care to kick the tires? It's easy to [install and try Pinniped](doc/demo.md).
|
||||
The release checklist is committed to this repo as an [issue template](https://github.com/vmware-tanzu/pinniped/tree/main/.github/ISSUE_TEMPLATE/release_checklist.md).
|
||||
|
||||
## Discussion
|
||||
## Pipelines
|
||||
|
||||
Got a question, comment, or idea? Please don't hesitate to reach out via the GitHub [Discussions](https://github.com/vmware-tanzu/pinniped/discussions) tab at the top of this page.
|
||||
Pinniped uses [Concourse](https://concourse-ci.org) for CI/CD.
|
||||
We are currently running our Concourse on a network that can only be reached from inside the corporate network at [ci.pinniped.broadcom.net](https://ci.pinniped.broadcom.net).
|
||||
|
||||
## Contributions
|
||||
The following pipelines are implemented in this branch. Not all pipelines are necessarily publicly visible, although our goal is to make them all visible.
|
||||
|
||||
Contributions are welcome. Before contributing, please see the [contributing guide](CONTRIBUTING.md).
|
||||
- `main`
|
||||
|
||||
## Reporting Security Vulnerabilities
|
||||
This is the main pipeline that runs on merges to `main`. It builds, tests, and (when manually triggered) releases from main.
|
||||
|
||||
Please follow the procedure described in [SECURITY.md](SECURITY.md).
|
||||
- `pull-requests`
|
||||
|
||||
This is a pipeline that triggers for each open pull request. It runs a smaller subset of the integration tests and validations as `pinniped`.
|
||||
|
||||
- `dockerfile-builders`
|
||||
|
||||
This pipeline builds a bunch of custom utility container images that are used in our CI and testing.
|
||||
|
||||
- `build-gi-cli` (a container image that includes the GitHub CLI)
|
||||
- `build-github-pr-resource` (a [fork](https://github.com/pinniped-ci-bot/github-pr-resource) of the `github-pr-resource` with support for gating PRs for untrusted users)
|
||||
- `build-code-coverage-uploader` (uploading code coverage during unit tests)
|
||||
- `build-eks-deployer-dockerfile` (deploying our app to EKS clusters)
|
||||
- `build-k8s-app-deployer-dockerfile` (deploying our app to clusters)
|
||||
- `build-pool-trigger-resource-dockerfile` (an updated implementation of the [pool-trigger-resource](https://github.com/cfmobile/pool-trigger-resource) for use in our CI)
|
||||
- `build-integration-test-runner-dockerfile` (running our integration tests)
|
||||
- `build-integration-test-runner-beta-dockerfile` (running our integration tests with the latest Chrome beta version)
|
||||
- `build-deployment-yaml-formatter-dockerfile` (templating our deployment YAML during a release)
|
||||
- `build-crane` (copy and tag container images during release)
|
||||
- `build-k8s-code-generator-*` (running our Kubernetes code generation under different Kubernetes dependency versions)
|
||||
- `build-test-dex` (a Dex used during tests)
|
||||
- `build-test-cfssl` (a cfssl used during tests)
|
||||
- `build-test-kubectl` (a kubectl used during tests)
|
||||
- `build-test-forward-proxy` (a Squid forward proxy used during tests)
|
||||
- `build-test-bitnami-ldap` (an OpenLDAP used during tests)
|
||||
|
||||
- `cleanup-aws`
|
||||
|
||||
This runs a script that runs [aws-nuke](https://github.com/rebuy-de/aws-nuke) against our test AWS account.
|
||||
This was occasionally needed because [eksctl](https://eksctl.io/) sometimes fails and leaks AWS resources. These resources cost money and use up our AWS quota.
|
||||
However, we seem to have worked around these issues and this pipeline has not been used for some time.
|
||||
|
||||
These jobs are only triggered manually. This is dangerous and should be used with care.
|
||||
|
||||
- `concourse-workers`
|
||||
|
||||
Deploys worker replicas on a long-lived GKE cluster that runs the Concourse workers, and can scale them up or down.
|
||||
|
||||
- `go-compatibility`
|
||||
|
||||
This pipeline runs nightly jobs that validate the compatibility of our code as a Go module in various contexts. We have jobs that test that our code compiles under older Go versions and that our CLI can be installed using `go install`.
|
||||
|
||||
- `security-scan`
|
||||
|
||||
This pipeline has nightly jobs that run security scans on our current main branch and most recently released artifacts.
|
||||
|
||||
The tools we use are:
|
||||
- [sonatype-nexus-community/nancy](https://github.com/sonatype-nexus-community/nancy), which scans Go module versions.
|
||||
- [aquasecurity/trivy](https://github.com/aquasecurity/trivy), which scans container images and Go binaries.
|
||||
- [govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck), which scans Go code to find calls to known-vulnerable dependencies.
|
||||
|
||||
This pipeline also has a job called `all-golang-deps-updated` which automatically submits PRs to update all
|
||||
direct dependencies in Pinniped's go.mod file, and update the Golang and distroless container images used in
|
||||
Pinniped's Dockerfiles.
|
||||
|
||||
- `kind-node-builder`
|
||||
|
||||
A nightly build job which uses the latest version of kind to build the HEAD of master of Kubernetes as a container
|
||||
image that can be used to deploy kind clusters. Other pipelines use this container image to install Pinniped and run
|
||||
integration tests. This gives us insight in any compatibility problems with the upcoming next release of Kubernetes.
|
||||
|
||||
## Deploying pipeline changes
|
||||
|
||||
After any shared tasks (`./pipelines/shared-tasks`) or helpers (`./pipelines/shared-helpers`) are edited,
|
||||
the commits must be pushed to the `ci` branch of this repository to take effect.
|
||||
|
||||
After editing any CI secrets or pipeline definitions, a maintainer must run the corresponding
|
||||
`./pipelines/$PIPELINE_NAME/update-pipeline.sh` script to apply the changes to Concourse.
|
||||
To deploy _all_ pipelines, a maintainer can run `./pipelines/update-all-pipelines.sh`.
|
||||
Don't forget to commit and push your changes after applying them!
|
||||
|
||||
## Github webhooks for pipelines
|
||||
|
||||
Some pipelines use github [webhooks to trigger resource checks](https://concourse-ci.org/resources.html#schema.resource.webhook_token),
|
||||
rather than the default of polling every minute, to make these pipelines more responsive and use fewer compute resources
|
||||
for running checks. Refer to places where `webhook_token` is configured in various `pipeline.yml` files.
|
||||
|
||||
To make these webhooks work, they must be defined on the [GitHub repo's settings](https://github.com/vmware-tanzu/pinniped/settings/hooks).
|
||||
|
||||
## Installing and operating Concourse
|
||||
|
||||
See [infra/README.md](./infra/README.md) for details about how Concourse was installed and how it can be operated.
|
||||
|
||||
## Acceptance environments
|
||||
|
||||
In addition to the many ephemeral Kubernetes clusters we use for testing, we also deploy a long-running acceptance environment.
|
||||
|
||||
Google Kubernetes Engine (GKE) in the `gke-acceptance-cluster` cluster in our GCP project in the `us-central1-c` availability zone.
|
||||
|
||||
To access this cluster, download the kubeconfig to `gke-acceptance.yaml` by running:
|
||||
|
||||
```cmd
|
||||
KUBECONFIG=gke-acceptance.yaml gcloud container clusters get-credentials gke-acceptance-cluster --project "$PINNIPED_GCP_PROJECT" --zone us-central1-c
|
||||
```
|
||||
|
||||
The above command assumes that you have already set `PINNIPED_GCP_PROJECT` to be the name of the GCP project.
|
||||
|
||||
## CI secrets
|
||||
|
||||
We use [Google Secret Manager](https://cloud.google.com/secret-manager) on GCP to store build/test/release secrets.
|
||||
These secrets are only available to the maintainers.
|
||||
|
||||
Using the `gcloud secrets list` command or the [web console](https://console.cloud.google.com/security/secret-manager),
|
||||
you can list the available secrets. The content of each secret is a YAML file with secret key/value pairs.
|
||||
You can also use the `./hack/edit-gcloud-secret.sh <secretName>` script to edit or inspect each secret.
|
||||
|
||||
## Configure Azure for CI to test on AKS
|
||||
|
||||
There are several CI jobs which test that Pinniped works when installed on Azure's AKS.
|
||||
For these jobs to run, they need to be able to create and delete ephemeral AKS clusters.
|
||||
This requires the following:
|
||||
|
||||
1. An active Azure Subscription. (A "subscription" in Azure is the equivalent of an "account" in AWS or a "project" in GCP.)
|
||||
2. An Azure App Registration (basically, a service account) active in the same Directory (aka tenant) as the Subscription.
|
||||
Create the app in "My Organization Only". It does not need a redirect URI or any other optional settings.
|
||||
Create a client secret for this app. If you want the client secret to have a long lifetime, you can use the `az` CLI to create it.
|
||||
In the Subscription's IAM settings, assign this app the role "Azure Kubernetes Service Contributor Role" to allow
|
||||
the app to manage AKS clusters. Also assign this app the role "Reader" to allow it to read all resources
|
||||
(used by the `remove-orphaned-aks-clusters` CI task).
|
||||
Do not grant this app permissions in any other Subscription or use it for any other purpose.
|
||||
3. Configure the pipelines with the app's Application (client) ID, Client Secret, and Directory (tenant) ID
|
||||
as the appropriate secret values.
|
||||
|
||||
The CI jobs will create and delete AKS clusters in a Resource Group called `pinniped-ci` within the provided Subscription.
|
||||
|
||||
## Configure AWS for CI to test on EKS
|
||||
|
||||
There are several CI jobs which test that Pinniped works when installed on Amazon's EKS.
|
||||
For these jobs to run, they need to be able to create and delete ephemeral EKS clusters.
|
||||
There are also some jobs to cleanup any orphaned resources (e.g. IP addresses) in the AWS account.
|
||||
These jobs requires the following:
|
||||
|
||||
1. An active AWS account, which will only be used for this purpose.
|
||||
2. Two IAM users in that account, each with a role that can be assumed.
|
||||
These IAM users which should only be used for Pinniped CI and no other purpose.
|
||||
They should only have permissions to perform AWS actions in the relevant AWS account, and no other account.
|
||||
3. The first user and role should have permission to create and delete EKS clusters using `eksctl`.
|
||||
The permissions required can be found in the [eksctl docs](https://eksctl.io/usage/minimum-iam-policies).
|
||||
The user also needs permission to run `aws logs put-retention-policy`, `aws ec2 describe-nat-gateways`,
|
||||
and `aws ec2 delete-nat-gateway`.
|
||||
4. The second user and role should have broad permissions to get and delete everything in the account.
|
||||
It will be used to run `aws-nuke` to list and/or clean resources from the AWS account.
|
||||
To use `aws-nuke`, the user also needs to have an AWS account alias
|
||||
(see the [cleanup-aws task](pipelines/shared-tasks/cleanup-aws/task.sh) for details).
|
||||
|
||||
## Setting Up Active Directory Test Environment
|
||||
|
||||
To test the `ActiveDirectoryIdentityProvider` functionality, we have a long-running Active Directory Domain Controller
|
||||
server instance in our GCP account. See [AD-SETUP.md](AD-SETUP.md) for details.
|
||||
|
||||
## Running integration tests on your laptop using AD
|
||||
|
||||
The relevant environment variables can be pulled from the secret manager via the `hack/get-active-directory-env-vars.sh` script.
|
||||
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
|
||||
|
||||
```bash
|
||||
# Must authenticate to glcoud to access the secret manager.
|
||||
gcloud auth login
|
||||
# In the pinniped repo's main branch or in your PR branch:
|
||||
hack/prepare-for-integration-tests.sh --get-active-directory-vars "$HOME/path/to/pinniped-ci-branch/hack/get-active-directory-env-vars.sh"
|
||||
```
|
||||
|
||||
## Running integration tests on your laptop using GitHub
|
||||
|
||||
The relevant environment variables can be pulled from the secret manager via the `hack/get-github-env-vars.sh` script.
|
||||
This can be used by maintainers with Pinniped's `/hack/prepare-for-integration-tests.sh` script in the following way:
|
||||
|
||||
```bash
|
||||
# Must authenticate to glcoud to access the secret manager.
|
||||
gcloud auth login
|
||||
# In the pinniped repo's main branch or in your PR branch:
|
||||
hack/prepare-for-integration-tests.sh --get-github-vars "$HOME/path/to/pinniped-ci-branch/hack/get-github-env-vars.sh"
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Pinniped is open source and licensed under Apache License Version 2.0. See [LICENSE](LICENSE).
|
||||
|
||||
Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
|
||||
13
SECURITY.md
13
SECURITY.md
@@ -1,12 +1 @@
|
||||
# Reporting a Vulnerability
|
||||
|
||||
Pinniped development is sponsored by VMware, and the Pinniped team encourages users
|
||||
who become aware of a security vulnerability in Pinniped to report any potential
|
||||
vulnerabilities found to security@vmware.com. If possible, please include a description
|
||||
of the effects of the vulnerability, reproduction steps, and a description of in which
|
||||
version of Pinniped or its dependencies the vulnerability was discovered.
|
||||
The use of encrypted email is encouraged. The public PGP key can be found at https://kb.vmware.com/kb/1055.
|
||||
|
||||
The Pinniped team hopes that users encountering a new vulnerability will contact
|
||||
us privately as it is in the best interests of our users that the Pinniped team has
|
||||
an opportunity to investigate and confirm a suspected vulnerability before it becomes public knowledge.
|
||||
Please see https://github.com/vmware-tanzu/pinniped/blob/main/SECURITY.md
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# API Generation Templates
|
||||
|
||||
This directory contains a template for generating our Kubernetes API code across several Kubernetes versions.
|
||||
|
||||
See the [`./generated`](../generated) directory for the rendered output.
|
||||
@@ -1,8 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=authentication.concierge.pinniped.dev
|
||||
|
||||
// Package authentication is the internal version of the Pinniped concierge authentication API.
|
||||
package authentication
|
||||
@@ -1,4 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/concierge/authentication
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=authentication.concierge.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped concierge authentication API.
|
||||
package v1alpha1
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "authentication.concierge.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&WebhookAuthenticator{},
|
||||
&WebhookAuthenticatorList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ConditionStatus is effectively an enum type for Condition.Status.
|
||||
type ConditionStatus string
|
||||
|
||||
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
|
||||
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
|
||||
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||
// intermediate conditions, e.g. ConditionDegraded.
|
||||
const (
|
||||
ConditionTrue ConditionStatus = "True"
|
||||
ConditionFalse ConditionStatus = "False"
|
||||
ConditionUnknown ConditionStatus = "Unknown"
|
||||
)
|
||||
|
||||
// Condition status of a resource (mirrored from the metav1.Condition type added in Kubernetes 1.19). In a future API
|
||||
// version we can switch to using the upstream type.
|
||||
// See https://github.com/kubernetes/apimachinery/blob/v0.19.0/pkg/apis/meta/v1/types.go#L1353-L1413.
|
||||
type Condition struct {
|
||||
// type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
// ---
|
||||
// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
|
||||
// useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
|
||||
// +kubebuilder:validation:MaxLength=316
|
||||
Type string `json:"type"`
|
||||
|
||||
// status of the condition, one of True, False, Unknown.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Enum=True;False;Unknown
|
||||
Status ConditionStatus `json:"status"`
|
||||
|
||||
// observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
// with respect to the current state of the instance.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Format=date-time
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
|
||||
|
||||
// reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
// Producers of specific condition types may define expected values and meanings for this field,
|
||||
// and whether the values are considered a guaranteed API.
|
||||
// The value should be a CamelCase string.
|
||||
// This field may not be empty.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:MaxLength=1024
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
|
||||
Reason string `json:"reason"`
|
||||
|
||||
// message is a human readable message indicating details about the transition.
|
||||
// This may be an empty string.
|
||||
// +required
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:MaxLength=32768
|
||||
Message string `json:"message"`
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// Configuration for configuring TLS on various authenticators.
|
||||
type TLSSpec struct {
|
||||
// X.509 Certificate Authority (base64-encoded PEM bundle). If omitted, a default set of system roots will be trusted.
|
||||
// +optional
|
||||
CertificateAuthorityData string `json:"certificateAuthorityData,omitempty"`
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// Status of a webhook authenticator.
|
||||
type WebhookAuthenticatorStatus struct {
|
||||
// Represents the observations of the authenticator's current state.
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
|
||||
}
|
||||
|
||||
// Spec for configuring a webhook authenticator.
|
||||
type WebhookAuthenticatorSpec struct {
|
||||
// Webhook server endpoint URL.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:Pattern=`^https://`
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// TLS configuration.
|
||||
// +optional
|
||||
TLS *TLSSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
// WebhookAuthenticator describes the configuration of a webhook authenticator.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:resource:categories=all;authenticator;authenticators
|
||||
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
|
||||
type WebhookAuthenticator struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Spec for configuring the authenticator.
|
||||
Spec WebhookAuthenticatorSpec `json:"spec"`
|
||||
|
||||
// Status of the authenticator.
|
||||
Status WebhookAuthenticatorStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// List of WebhookAuthenticator objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type WebhookAuthenticatorList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []WebhookAuthenticator `json:"items"`
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=config.concierge.pinniped.dev
|
||||
|
||||
// Package config is the internal version of the Pinniped concierge configuration API.
|
||||
package config
|
||||
@@ -1,4 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package config
|
||||
@@ -1,4 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/concierge/config
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=config.concierge.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped concierge configuration API.
|
||||
package v1alpha1
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "config.concierge.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&CredentialIssuer{},
|
||||
&CredentialIssuerList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +kubebuilder:validation:Enum=KubeClusterSigningCertificate
|
||||
type StrategyType string
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Error
|
||||
type StrategyStatus string
|
||||
|
||||
// +kubebuilder:validation:Enum=FetchedKey;CouldNotFetchKey
|
||||
type StrategyReason string
|
||||
|
||||
const (
|
||||
KubeClusterSigningCertificateStrategyType = StrategyType("KubeClusterSigningCertificate")
|
||||
|
||||
SuccessStrategyStatus = StrategyStatus("Success")
|
||||
ErrorStrategyStatus = StrategyStatus("Error")
|
||||
|
||||
CouldNotFetchKeyStrategyReason = StrategyReason("CouldNotFetchKey")
|
||||
FetchedKeyStrategyReason = StrategyReason("FetchedKey")
|
||||
)
|
||||
|
||||
// Status of a credential issuer.
|
||||
type CredentialIssuerStatus struct {
|
||||
// List of integration strategies that were attempted by Pinniped.
|
||||
Strategies []CredentialIssuerStrategy `json:"strategies"`
|
||||
|
||||
// Information needed to form a valid Pinniped-based kubeconfig using this credential issuer.
|
||||
// +optional
|
||||
KubeConfigInfo *CredentialIssuerKubeConfigInfo `json:"kubeConfigInfo,omitempty"`
|
||||
}
|
||||
|
||||
// Information needed to form a valid Pinniped-based kubeconfig using this credential issuer.
|
||||
type CredentialIssuerKubeConfigInfo struct {
|
||||
// The K8s API server URL.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:Pattern=`^https://|^http://`
|
||||
Server string `json:"server"`
|
||||
|
||||
// The K8s API server CA bundle.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
CertificateAuthorityData string `json:"certificateAuthorityData"`
|
||||
}
|
||||
|
||||
// Status of an integration strategy that was attempted by Pinniped.
|
||||
type CredentialIssuerStrategy struct {
|
||||
// Type of integration attempted.
|
||||
Type StrategyType `json:"type"`
|
||||
|
||||
// Status of the attempted integration strategy.
|
||||
Status StrategyStatus `json:"status"`
|
||||
|
||||
// Reason for the current status.
|
||||
Reason StrategyReason `json:"reason"`
|
||||
|
||||
// Human-readable description of the current status.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Message string `json:"message"`
|
||||
|
||||
// When the status was last checked.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
|
||||
}
|
||||
|
||||
// Describes the configuration status of a Pinniped credential issuer.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type CredentialIssuer struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Status of the credential issuer.
|
||||
Status CredentialIssuerStatus `json:"status"`
|
||||
}
|
||||
|
||||
// List of CredentialIssuer objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type CredentialIssuerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []CredentialIssuer `json:"items"`
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=login.concierge.pinniped.dev
|
||||
|
||||
// Package login is the internal version of the Pinniped login API.
|
||||
package login
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package login
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "login.concierge.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind.
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns back a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&TokenCredentialRequest{},
|
||||
&TokenCredentialRequestList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package login
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ClusterCredential is a credential (token or certificate) which is valid on the Kubernetes cluster.
|
||||
type ClusterCredential struct {
|
||||
// ExpirationTimestamp indicates a time when the provided credentials expire.
|
||||
ExpirationTimestamp metav1.Time
|
||||
|
||||
// Token is a bearer token used by the client for request authentication.
|
||||
Token string
|
||||
|
||||
// PEM-encoded client TLS certificates (including intermediates, if any).
|
||||
ClientCertificateData string
|
||||
|
||||
// PEM-encoded private key for the above certificate.
|
||||
ClientKeyData string
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package login
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type TokenCredentialRequestSpec struct {
|
||||
// Bearer token supplied with the credential request.
|
||||
Token string
|
||||
|
||||
// Reference to an authenticator which can validate this credential request.
|
||||
Authenticator corev1.TypedLocalObjectReference
|
||||
}
|
||||
|
||||
type TokenCredentialRequestStatus struct {
|
||||
// A ClusterCredential will be returned for a successful credential request.
|
||||
// +optional
|
||||
Credential *ClusterCredential
|
||||
|
||||
// An error message will be returned for an unsuccessful credential request.
|
||||
// +optional
|
||||
Message *string
|
||||
}
|
||||
|
||||
// TokenCredentialRequest submits an IDP-specific credential to Pinniped in exchange for a cluster-specific credential.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequest struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
|
||||
Spec TokenCredentialRequestSpec
|
||||
Status TokenCredentialRequestStatus
|
||||
}
|
||||
|
||||
// TokenCredentialRequestList is a list of TokenCredentialRequest objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequestList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is a list of TokenCredentialRequest
|
||||
Items []TokenCredentialRequest
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/concierge/login
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=login.concierge.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped login API.
|
||||
package v1alpha1
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "login.concierge.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&TokenCredentialRequest{},
|
||||
&TokenCredentialRequestList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ClusterCredential is the cluster-specific credential returned on a successful credential request. It
|
||||
// contains either a valid bearer token or a valid TLS certificate and corresponding private key for the cluster.
|
||||
type ClusterCredential struct {
|
||||
// ExpirationTimestamp indicates a time when the provided credentials expire.
|
||||
ExpirationTimestamp metav1.Time `json:"expirationTimestamp,omitempty"`
|
||||
|
||||
// Token is a bearer token used by the client for request authentication.
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// PEM-encoded client TLS certificates (including intermediates, if any).
|
||||
ClientCertificateData string `json:"clientCertificateData,omitempty"`
|
||||
|
||||
// PEM-encoded private key for the above certificate.
|
||||
ClientKeyData string `json:"clientKeyData,omitempty"`
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TokenCredentialRequestSpec is the specification of a TokenCredentialRequest, expected on requests to the Pinniped API.
|
||||
type TokenCredentialRequestSpec struct {
|
||||
// Bearer token supplied with the credential request.
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// Reference to an authenticator which can validate this credential request.
|
||||
Authenticator corev1.TypedLocalObjectReference `json:"authenticator"`
|
||||
}
|
||||
|
||||
// TokenCredentialRequestStatus is the status of a TokenCredentialRequest, returned on responses to the Pinniped API.
|
||||
type TokenCredentialRequestStatus struct {
|
||||
// A Credential will be returned for a successful credential request.
|
||||
// +optional
|
||||
Credential *ClusterCredential `json:"credential,omitempty"`
|
||||
|
||||
// An error message will be returned for an unsuccessful credential request.
|
||||
// +optional
|
||||
Message *string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// TokenCredentialRequest submits an IDP-specific credential to Pinniped in exchange for a cluster-specific credential.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec TokenCredentialRequestSpec `json:"spec,omitempty"`
|
||||
Status TokenCredentialRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// TokenCredentialRequestList is a list of TokenCredentialRequest objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TokenCredentialRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []TokenCredentialRequest `json:"items"`
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=config.supervisor.pinniped.dev
|
||||
|
||||
// Package config is the internal version of the Pinniped configuration API.
|
||||
package config
|
||||
@@ -1,4 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package config
|
||||
@@ -1,4 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=go.pinniped.dev/GENERATED_PKG/apis/supervisor/config
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=config.supervisor.pinniped.dev
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the Pinniped supervisor configuration API.
|
||||
package v1alpha1
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "config.supervisor.pinniped.dev"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&OIDCProvider{},
|
||||
&OIDCProviderList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
type OIDCProviderStatusCondition string
|
||||
|
||||
const (
|
||||
SuccessOIDCProviderStatusCondition = OIDCProviderStatusCondition("Success")
|
||||
DuplicateOIDCProviderStatusCondition = OIDCProviderStatusCondition("Duplicate")
|
||||
SameIssuerHostMustUseSameSecretOIDCProviderStatusCondition = OIDCProviderStatusCondition("SameIssuerHostMustUseSameSecret")
|
||||
InvalidOIDCProviderStatusCondition = OIDCProviderStatusCondition("Invalid")
|
||||
)
|
||||
|
||||
// OIDCProviderTLSSpec is a struct that describes the TLS configuration for an OIDC Provider.
|
||||
type OIDCProviderTLSSpec struct {
|
||||
// SecretName is an optional name of a Secret in the same namespace, of type `kubernetes.io/tls`, which contains
|
||||
// the TLS serving certificate for the HTTPS endpoints served by this OIDCProvider. When provided, the TLS Secret
|
||||
// named here must contain keys named `tls.crt` and `tls.key` that contain the certificate and private key to use
|
||||
// for TLS.
|
||||
//
|
||||
// Server Name Indication (SNI) is an extension to the Transport Layer Security (TLS) supported by all major browsers.
|
||||
//
|
||||
// SecretName is required if you would like to use different TLS certificates for issuers of different hostnames.
|
||||
// SNI requests do not include port numbers, so all issuers with the same DNS hostname must use the same
|
||||
// SecretName value even if they have different port numbers.
|
||||
//
|
||||
// SecretName is not required when you would like to use only the HTTP endpoints (e.g. when terminating TLS at an
|
||||
// Ingress). It is also not required when you would like all requests to this OIDC Provider's HTTPS endpoints to
|
||||
// use the default TLS certificate, which is configured elsewhere.
|
||||
//
|
||||
// When your Issuer URL's host is an IP address, then this field is ignored. SNI does not work for IP addresses.
|
||||
//
|
||||
// +optional
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProviderSpec is a struct that describes an OIDC Provider.
|
||||
type OIDCProviderSpec struct {
|
||||
// Issuer is the OIDC Provider's issuer, per the OIDC Discovery Metadata document, as well as the
|
||||
// identifier that it will use for the iss claim in issued JWTs. This field will also be used as
|
||||
// the base URL for any endpoints used by the OIDC Provider (e.g., if your issuer is
|
||||
// https://example.com/foo, then your authorization endpoint will look like
|
||||
// https://example.com/foo/some/path/to/auth/endpoint).
|
||||
//
|
||||
// See
|
||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#rfc.section.3 for more information.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Issuer string `json:"issuer"`
|
||||
|
||||
// TLS configures how this OIDCProvider is served over Transport Layer Security (TLS).
|
||||
// +optional
|
||||
TLS *OIDCProviderTLSSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProviderStatus is a struct that describes the actual state of an OIDC Provider.
|
||||
type OIDCProviderStatus struct {
|
||||
// Status holds an enum that describes the state of this OIDC Provider. Note that this Status can
|
||||
// represent success or failure.
|
||||
// +optional
|
||||
Status OIDCProviderStatusCondition `json:"status,omitempty"`
|
||||
|
||||
// Message provides human-readable details about the Status.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys
|
||||
// are stored. If it is empty, then the signing/verification keys are either unknown or they don't
|
||||
// exist.
|
||||
// +optional
|
||||
JWKSSecret corev1.LocalObjectReference `json:"jwksSecret,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProvider describes the configuration of an OIDC provider.
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type OIDCProvider struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Spec of the OIDC provider.
|
||||
Spec OIDCProviderSpec `json:"spec"`
|
||||
|
||||
// Status of the OIDC provider.
|
||||
Status OIDCProviderStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// List of OIDCProvider objects.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type OIDCProviderList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []OIDCProvider `json:"items"`
|
||||
}
|
||||
@@ -1,400 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package main provides a authentication webhook program.
|
||||
//
|
||||
// This webhook is meant to be used in demo settings to play around with
|
||||
// Pinniped. As well, it can come in handy in integration tests.
|
||||
//
|
||||
// This webhook is NOT meant for use in production systems.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/controller/apicerts"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
)
|
||||
|
||||
const (
|
||||
// This string must match the name of the Namespace declared in the deployment yaml.
|
||||
namespace = "local-user-authenticator"
|
||||
// This string must match the name of the Service declared in the deployment yaml.
|
||||
serviceName = "local-user-authenticator"
|
||||
|
||||
singletonWorker = 1
|
||||
defaultResyncInterval = 3 * time.Minute
|
||||
|
||||
invalidRequest = constable.Error("invalid request")
|
||||
)
|
||||
|
||||
type webhook struct {
|
||||
certProvider dynamiccert.Provider
|
||||
secretInformer corev1informers.SecretInformer
|
||||
}
|
||||
|
||||
func newWebhook(
|
||||
certProvider dynamiccert.Provider,
|
||||
secretInformer corev1informers.SecretInformer,
|
||||
) *webhook {
|
||||
return &webhook{
|
||||
certProvider: certProvider,
|
||||
secretInformer: secretInformer,
|
||||
}
|
||||
}
|
||||
|
||||
// start runs the webhook in a separate goroutine and returns whether or not the
|
||||
// webhook was started successfully.
|
||||
func (w *webhook) start(ctx context.Context, l net.Listener) error {
|
||||
server := http.Server{
|
||||
Handler: w,
|
||||
TLSConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS13,
|
||||
GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certPEM, keyPEM := w.certProvider.CurrentCertKeyContent()
|
||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
return &cert, err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
// Per ListenAndServeTLS doc, the {cert,key}File parameters can be empty
|
||||
// since we want to use the certs from http.Server.TLSConfig.
|
||||
errCh <- server.ServeTLS(l, "", "")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
klog.InfoS("server exited", "err", err)
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("server context cancelled", "err", ctx.Err())
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
klog.InfoS("server shutdown failed", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webhook) ServeHTTP(rsp http.ResponseWriter, req *http.Request) {
|
||||
username, password, err := getUsernameAndPasswordFromRequest(rsp, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() { _ = req.Body.Close() }()
|
||||
|
||||
secret, err := w.secretInformer.Lister().Secrets(namespace).Get(username)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
klog.InfoS("could not get secret", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if notFound {
|
||||
klog.InfoS("user not found")
|
||||
respondWithUnauthenticated(rsp)
|
||||
return
|
||||
}
|
||||
|
||||
passwordMatches := bcrypt.CompareHashAndPassword(
|
||||
secret.Data["passwordHash"],
|
||||
[]byte(password),
|
||||
) == nil
|
||||
if !passwordMatches {
|
||||
klog.InfoS("authentication failed: wrong password")
|
||||
respondWithUnauthenticated(rsp)
|
||||
return
|
||||
}
|
||||
|
||||
groups := []string{}
|
||||
groupsBuf := bytes.NewBuffer(secret.Data["groups"])
|
||||
if groupsBuf.Len() > 0 {
|
||||
groupsCSVReader := csv.NewReader(groupsBuf)
|
||||
groups, err = groupsCSVReader.Read()
|
||||
if err != nil {
|
||||
klog.InfoS("could not read groups", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
trimLeadingAndTrailingWhitespace(groups)
|
||||
}
|
||||
|
||||
klog.InfoS("successful authentication")
|
||||
respondWithAuthenticated(rsp, secret.ObjectMeta.Name, string(secret.UID), groups)
|
||||
}
|
||||
|
||||
func getUsernameAndPasswordFromRequest(rsp http.ResponseWriter, req *http.Request) (string, string, error) {
|
||||
if req.URL.Path != "/authenticate" {
|
||||
klog.InfoS("received request path other than /authenticate", "path", req.URL.Path)
|
||||
rsp.WriteHeader(http.StatusNotFound)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if req.Method != http.MethodPost {
|
||||
klog.InfoS("received request method other than post", "method", req.Method)
|
||||
rsp.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if !headerContains(req, "Content-Type", "application/json") {
|
||||
klog.InfoS("content type is not application/json", "Content-Type", req.Header.Values("Content-Type"))
|
||||
rsp.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if !headerContains(req, "Accept", "application/json") &&
|
||||
!headerContains(req, "Accept", "application/*") &&
|
||||
!headerContains(req, "Accept", "*/*") {
|
||||
klog.InfoS("client does not accept application/json", "Accept", req.Header.Values("Accept"))
|
||||
rsp.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if req.Body == nil {
|
||||
klog.InfoS("invalid nil body")
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
var body authenticationv1beta1.TokenReview
|
||||
if err := json.NewDecoder(req.Body).Decode(&body); err != nil {
|
||||
klog.InfoS("failed to decode body", "err", err)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if body.APIVersion != authenticationv1beta1.SchemeGroupVersion.String() {
|
||||
klog.InfoS("invalid TokenReview apiVersion", "apiVersion", body.APIVersion)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if body.Kind != "TokenReview" {
|
||||
klog.InfoS("invalid TokenReview kind", "kind", body.Kind)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
tokenSegments := strings.SplitN(body.Spec.Token, ":", 2)
|
||||
if len(tokenSegments) != 2 {
|
||||
klog.InfoS("bad token format in request")
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
return tokenSegments[0], tokenSegments[1], nil
|
||||
}
|
||||
|
||||
func headerContains(req *http.Request, headerName, s string) bool {
|
||||
headerValues := req.Header.Values(headerName)
|
||||
for i := range headerValues {
|
||||
mimeTypes := strings.Split(headerValues[i], ",")
|
||||
for _, mimeType := range mimeTypes {
|
||||
mediaType, _, _ := mime.ParseMediaType(mimeType)
|
||||
if mediaType == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func trimLeadingAndTrailingWhitespace(ss []string) {
|
||||
for i := range ss {
|
||||
ss[i] = strings.TrimSpace(ss[i])
|
||||
}
|
||||
}
|
||||
|
||||
func respondWithUnauthenticated(rsp http.ResponseWriter) {
|
||||
rsp.Header().Add("Content-Type", "application/json")
|
||||
|
||||
body := authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: authenticationv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: false,
|
||||
},
|
||||
}
|
||||
if err := json.NewEncoder(rsp).Encode(body); err != nil {
|
||||
klog.InfoS("could not encode response", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func respondWithAuthenticated(
|
||||
rsp http.ResponseWriter,
|
||||
username, uid string,
|
||||
groups []string,
|
||||
) {
|
||||
rsp.Header().Add("Content-Type", "application/json")
|
||||
body := authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: authenticationv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: true,
|
||||
User: authenticationv1beta1.UserInfo{
|
||||
Username: username,
|
||||
Groups: groups,
|
||||
UID: uid,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := json.NewEncoder(rsp).Encode(body); err != nil {
|
||||
klog.InfoS("could not encode response", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func newK8sClient() (kubernetes.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
func startControllers(
|
||||
ctx context.Context,
|
||||
dynamicCertProvider dynamiccert.Provider,
|
||||
kubeClient kubernetes.Interface,
|
||||
kubeInformers kubeinformers.SharedInformerFactory,
|
||||
) {
|
||||
aVeryLongTime := time.Hour * 24 * 365 * 100
|
||||
|
||||
const certsSecretResourceName = "local-user-authenticator-tls-serving-certificate"
|
||||
|
||||
// Create controller manager.
|
||||
controllerManager := controllerlib.
|
||||
NewManager().
|
||||
WithController(
|
||||
apicerts.NewCertsManagerController(
|
||||
namespace,
|
||||
certsSecretResourceName,
|
||||
map[string]string{
|
||||
"app": "local-user-authenticator",
|
||||
},
|
||||
kubeClient,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
controllerlib.WithInitialEvent,
|
||||
aVeryLongTime,
|
||||
"local-user-authenticator CA",
|
||||
serviceName,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
apicerts.NewCertsObserverController(
|
||||
namespace,
|
||||
certsSecretResourceName,
|
||||
dynamicCertProvider,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
)
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
|
||||
go controllerManager.Start(ctx)
|
||||
}
|
||||
|
||||
func startWebhook(
|
||||
ctx context.Context,
|
||||
l net.Listener,
|
||||
dynamicCertProvider dynamiccert.Provider,
|
||||
secretInformer corev1informers.SecretInformer,
|
||||
) error {
|
||||
return newWebhook(dynamicCertProvider, secretInformer).start(ctx, l)
|
||||
}
|
||||
|
||||
func waitForSignal() os.Signal {
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, os.Interrupt)
|
||||
return <-signalCh
|
||||
}
|
||||
|
||||
func run() error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kubeClient, err := newK8sClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(namespace),
|
||||
)
|
||||
|
||||
dynamicCertProvider := dynamiccert.New()
|
||||
|
||||
startControllers(ctx, dynamicCertProvider, kubeClient, kubeInformers)
|
||||
klog.InfoS("controllers are ready")
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
l, err := net.Listen("tcp", ":8443")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create listener: %w", err)
|
||||
}
|
||||
defer func() { _ = l.Close() }()
|
||||
|
||||
err = startWebhook(ctx, l, dynamicCertProvider, kubeInformers.Core().V1().Secrets())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot start webhook: %w", err)
|
||||
}
|
||||
klog.InfoS("webhook is ready", "address", l.Addr().String())
|
||||
|
||||
gotSignal := waitForSignal()
|
||||
klog.InfoS("webhook exiting", "signal", gotSignal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,578 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
)
|
||||
|
||||
func TestWebhook(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const namespace = "local-user-authenticator"
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
user, otherUser, colonUser, noGroupUser, oneGroupUser, passwordUndefinedUser, emptyPasswordUser, invalidPasswordHashUser, undefinedGroupsUser :=
|
||||
"some-user", "other-user", "colon-user", "no-group-user", "one-group-user", "password-undefined-user", "empty-password-user", "invalid-password-hash-user", "undefined-groups-user"
|
||||
uid, otherUID, colonUID, noGroupUID, oneGroupUID, passwordUndefinedUID, emptyPasswordUID, invalidPasswordHashUID, undefinedGroupsUID :=
|
||||
"some-uid", "other-uid", "colon-uid", "no-group-uid", "one-group-uid", "password-undefined-uid", "empty-password-uid", "invalid-password-hash-uid", "undefined-groups-uid"
|
||||
password, otherPassword, colonPassword, noGroupPassword, oneGroupPassword, undefinedGroupsPassword :=
|
||||
"some-password", "other-password", "some-:-password", "no-group-password", "one-group-password", "undefined-groups-password"
|
||||
|
||||
group0, group1 := "some-group-0", "some-group-1"
|
||||
groups := group0 + " , " + group1
|
||||
|
||||
kubeClient := kubernetesfake.NewSimpleClientset()
|
||||
addSecretToFakeClientTracker(t, kubeClient, user, uid, password, groups)
|
||||
addSecretToFakeClientTracker(t, kubeClient, otherUser, otherUID, otherPassword, groups)
|
||||
addSecretToFakeClientTracker(t, kubeClient, colonUser, colonUID, colonPassword, groups)
|
||||
addSecretToFakeClientTracker(t, kubeClient, noGroupUser, noGroupUID, noGroupPassword, "")
|
||||
addSecretToFakeClientTracker(t, kubeClient, oneGroupUser, oneGroupUID, oneGroupPassword, group0)
|
||||
addSecretToFakeClientTracker(t, kubeClient, emptyPasswordUser, emptyPasswordUID, "", groups)
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(passwordUndefinedUID),
|
||||
Name: passwordUndefinedUser,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"groups": []byte(groups),
|
||||
},
|
||||
}))
|
||||
|
||||
undefinedGroupsUserPasswordHash, err := bcrypt.GenerateFromPassword([]byte(undefinedGroupsPassword), bcrypt.MinCost)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(undefinedGroupsUID),
|
||||
Name: undefinedGroupsUser,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"passwordHash": undefinedGroupsUserPasswordHash,
|
||||
},
|
||||
}))
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(invalidPasswordHashUID),
|
||||
Name: invalidPasswordHashUser,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"groups": []byte(groups),
|
||||
"passwordHash": []byte("not a valid password hash"),
|
||||
},
|
||||
}))
|
||||
|
||||
secretInformer := createSecretInformer(t, kubeClient)
|
||||
|
||||
certProvider, caBundle, serverName := newCertProvider(t)
|
||||
w := newWebhook(certProvider, secretInformer)
|
||||
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = l.Close() }()
|
||||
require.NoError(t, w.start(ctx, l))
|
||||
|
||||
client := newClient(caBundle, serverName)
|
||||
|
||||
goodURL := fmt.Sprintf("https://%s/authenticate", l.Addr().String())
|
||||
goodRequestHeaders := map[string][]string{
|
||||
"Content-Type": {"application/json; charset=UTF-8"},
|
||||
"Accept": {"application/json, */*"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
method string
|
||||
headers map[string][]string
|
||||
body func() (io.ReadCloser, error)
|
||||
|
||||
wantStatus int
|
||||
wantHeaders map[string][]string
|
||||
wantBody *authenticationv1beta1.TokenReview
|
||||
}{
|
||||
{
|
||||
name: "success for a user who belongs to multiple groups",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "success for a user who belongs to one groups",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(oneGroupUser + ":" + oneGroupPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(oneGroupUser, oneGroupUID, []string{group0}),
|
||||
},
|
||||
{
|
||||
name: "success for a user who belongs to no groups",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(noGroupUser + ":" + noGroupPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(noGroupUser, noGroupUID, nil),
|
||||
},
|
||||
{
|
||||
name: "wrong username for password",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(otherUser + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "when a user has no password hash in the secret",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(passwordUndefinedUser + ":foo") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "when a user has an invalid password hash in the secret",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(invalidPasswordHashUser + ":foo") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "success for a user has no groups defined in the secret",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBody(undefinedGroupsUser + ":" + undefinedGroupsPassword)
|
||||
},
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(undefinedGroupsUser, undefinedGroupsUID, nil),
|
||||
},
|
||||
{
|
||||
name: "when a user has empty string as their password",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(passwordUndefinedUser + ":foo") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "wrong password for username",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + otherPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "non-existent password for username",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + "some-non-existent-password") },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "non-existent username",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-non-existent-user" + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: unauthenticatedResponseJSON(),
|
||||
},
|
||||
{
|
||||
name: "bad token format (missing colon)",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user) },
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "password contains colon",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(colonUser + ":" + colonPassword) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(colonUser, colonUID, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "bad TokenReview group",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
user+":"+password,
|
||||
&schema.GroupVersionKind{
|
||||
Group: "bad group",
|
||||
Version: authenticationv1beta1.SchemeGroupVersion.Version,
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
)
|
||||
},
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "bad TokenReview version",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
user+":"+password,
|
||||
&schema.GroupVersionKind{
|
||||
Group: authenticationv1beta1.SchemeGroupVersion.Group,
|
||||
Version: "bad version",
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
)
|
||||
},
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "bad TokenReview kind",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
user+":"+password,
|
||||
&schema.GroupVersionKind{
|
||||
Group: authenticationv1beta1.SchemeGroupVersion.Group,
|
||||
Version: authenticationv1beta1.SchemeGroupVersion.Version,
|
||||
Kind: "wrong-kind",
|
||||
},
|
||||
)
|
||||
},
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "bad path",
|
||||
url: fmt.Sprintf("https://%s/tuna", l.Addr().String()),
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "bad method",
|
||||
url: goodURL,
|
||||
method: http.MethodGet,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusMethodNotAllowed,
|
||||
},
|
||||
{
|
||||
name: "bad content type",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/xml"},
|
||||
"Accept": {"application/json"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "bad accept",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"application/xml"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody("some-token") },
|
||||
wantStatus: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "success when there are multiple accepts and one of them is json",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"something/else, application/xml, application/json"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "success when there are multiple accepts and one of them is */*",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"something/else, */*, application/foo"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "success when there are multiple accepts and one of them is application/*",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Accept": {"something/else, application/*, application/foo"},
|
||||
},
|
||||
body: func() (io.ReadCloser, error) { return newTokenReviewBody(user + ":" + password) },
|
||||
wantStatus: http.StatusOK,
|
||||
wantHeaders: map[string][]string{"Content-Type": {"application/json"}},
|
||||
wantBody: authenticatedResponseJSON(user, uid, []string{group0, group1}),
|
||||
},
|
||||
{
|
||||
name: "bad body",
|
||||
url: goodURL,
|
||||
method: http.MethodPost,
|
||||
headers: goodRequestHeaders,
|
||||
body: func() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewBuffer([]byte("invalid body"))), nil },
|
||||
wantStatus: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
parsedURL, err := url.Parse(test.url)
|
||||
require.NoError(t, err)
|
||||
|
||||
body, err := test.body()
|
||||
require.NoError(t, err)
|
||||
|
||||
rsp, err := client.Do(&http.Request{
|
||||
Method: test.method,
|
||||
URL: parsedURL,
|
||||
Header: test.headers,
|
||||
Body: body,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = rsp.Body.Close() }()
|
||||
|
||||
require.Equal(t, test.wantStatus, rsp.StatusCode)
|
||||
|
||||
if test.wantHeaders != nil {
|
||||
for k, v := range test.wantHeaders {
|
||||
require.Equal(t, v, rsp.Header.Values(k))
|
||||
}
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(rsp.Body)
|
||||
require.NoError(t, err)
|
||||
if test.wantBody != nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
var tr authenticationv1beta1.TokenReview
|
||||
require.NoError(t, json.Unmarshal(responseBody, &tr))
|
||||
require.Equal(t, test.wantBody, &tr)
|
||||
} else {
|
||||
require.Empty(t, responseBody)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createSecretInformer(t *testing.T, kubeClient kubernetes.Interface) corev1informers.SecretInformer {
|
||||
t.Helper()
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
|
||||
|
||||
secretInformer := kubeInformers.Core().V1().Secrets()
|
||||
|
||||
// We need to call Informer() on the secretInformer to lazily instantiate the
|
||||
// informer factory before syncing it.
|
||||
secretInformer.Informer()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
|
||||
defer cancel()
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
|
||||
informerTypesSynced := kubeInformers.WaitForCacheSync(ctx.Done())
|
||||
require.True(t, informerTypesSynced[reflect.TypeOf(&corev1.Secret{})])
|
||||
|
||||
return secretInformer
|
||||
}
|
||||
|
||||
// newClientProvider returns a dynamiccert.Provider configured
|
||||
// with valid serving cert, the CA bundle that can be used to verify the serving
|
||||
// cert, and the server name that can be used to verify the TLS peer.
|
||||
func newCertProvider(t *testing.T) (dynamiccert.Provider, []byte, string) {
|
||||
t.Helper()
|
||||
|
||||
serverName := "local-user-authenticator"
|
||||
|
||||
ca, err := certauthority.New(pkix.Name{CommonName: serverName + " CA"}, time.Hour*24)
|
||||
require.NoError(t, err)
|
||||
|
||||
cert, err := ca.Issue(pkix.Name{CommonName: serverName}, []string{serverName}, nil, time.Hour*24)
|
||||
require.NoError(t, err)
|
||||
|
||||
certPEM, keyPEM, err := certauthority.ToPEM(cert)
|
||||
require.NoError(t, err)
|
||||
|
||||
certProvider := dynamiccert.New()
|
||||
certProvider.Set(certPEM, keyPEM)
|
||||
|
||||
return certProvider, ca.Bundle(), serverName
|
||||
}
|
||||
|
||||
// newClient creates an http.Client that can be used to make an HTTPS call to a
|
||||
// service whose serving certs can be verified by the provided CA bundle.
|
||||
func newClient(caBundle []byte, serverName string) *http.Client {
|
||||
rootCAs := x509.NewCertPool()
|
||||
rootCAs.AppendCertsFromPEM(caBundle)
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS13,
|
||||
RootCAs: rootCAs,
|
||||
ServerName: serverName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newTokenReviewBody creates an io.ReadCloser that contains a JSON-encodeed
|
||||
// TokenReview request with expected APIVersion and Kind fields.
|
||||
func newTokenReviewBody(token string) (io.ReadCloser, error) {
|
||||
return newTokenReviewBodyWithGVK(
|
||||
token,
|
||||
&schema.GroupVersionKind{
|
||||
Group: authenticationv1beta1.SchemeGroupVersion.Group,
|
||||
Version: authenticationv1beta1.SchemeGroupVersion.Version,
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// newTokenReviewBodyWithGVK creates an io.ReadCloser that contains a
|
||||
// JSON-encoded TokenReview request. The TypeMeta fields of the TokenReview are
|
||||
// filled in with the provided gvk.
|
||||
func newTokenReviewBodyWithGVK(token string, gvk *schema.GroupVersionKind) (io.ReadCloser, error) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
tr := authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: gvk.GroupVersion().String(),
|
||||
Kind: gvk.Kind,
|
||||
},
|
||||
Spec: authenticationv1beta1.TokenReviewSpec{
|
||||
Token: token,
|
||||
},
|
||||
}
|
||||
err := json.NewEncoder(buf).Encode(&tr)
|
||||
return ioutil.NopCloser(buf), err
|
||||
}
|
||||
|
||||
func unauthenticatedResponseJSON() *authenticationv1beta1.TokenReview {
|
||||
return &authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: "authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func authenticatedResponseJSON(user, uid string, groups []string) *authenticationv1beta1.TokenReview {
|
||||
return &authenticationv1beta1.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "TokenReview",
|
||||
APIVersion: "authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: authenticationv1beta1.TokenReviewStatus{
|
||||
Authenticated: true,
|
||||
User: authenticationv1beta1.UserInfo{
|
||||
Username: user,
|
||||
Groups: groups,
|
||||
UID: uid,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addSecretToFakeClientTracker(t *testing.T, kubeClient *kubernetesfake.Clientset, username, uid, password, groups string) {
|
||||
passwordHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
|
||||
require.NoError(t, err)
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(uid),
|
||||
Name: username,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"passwordHash": passwordHash,
|
||||
"groups": []byte(groups),
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, kubeClient.Tracker().Add(secret))
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/concierge/server"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
// Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
|
||||
var timeSinceCompile time.Duration
|
||||
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
|
||||
timeSinceCompile = time.Since(buildDate).Round(time.Second)
|
||||
}
|
||||
klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile)
|
||||
|
||||
ctx := genericapiserver.SetupSignalContext()
|
||||
|
||||
if err := server.New(ctx, os.Args[1:], os.Stdout, os.Stderr).Run(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/supervisor/clientset/versioned"
|
||||
pinnipedinformers "go.pinniped.dev/generated/1.19/client/supervisor/informers/externalversions"
|
||||
"go.pinniped.dev/internal/config/supervisor"
|
||||
"go.pinniped.dev/internal/controller/supervisorconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/oidc/jwks"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/oidc/provider/manager"
|
||||
)
|
||||
|
||||
const (
|
||||
singletonWorker = 1
|
||||
defaultResyncInterval = 3 * time.Minute
|
||||
)
|
||||
|
||||
func start(ctx context.Context, l net.Listener, handler http.Handler) {
|
||||
server := http.Server{Handler: handler}
|
||||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- server.Serve(l)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
klog.InfoS("server exited", "err", err)
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("server context cancelled", "err", ctx.Err())
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
klog.InfoS("server shutdown failed", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func waitForSignal() os.Signal {
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, os.Interrupt)
|
||||
return <-signalCh
|
||||
}
|
||||
|
||||
func startControllers(
|
||||
ctx context.Context,
|
||||
cfg *supervisor.Config,
|
||||
issuerManager *manager.Manager,
|
||||
dynamicJWKSProvider jwks.DynamicJWKSProvider,
|
||||
dynamicTLSCertProvider provider.DynamicTLSCertProvider,
|
||||
kubeClient kubernetes.Interface,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
kubeInformers kubeinformers.SharedInformerFactory,
|
||||
pinnipedInformers pinnipedinformers.SharedInformerFactory,
|
||||
) {
|
||||
// Create controller manager.
|
||||
controllerManager := controllerlib.
|
||||
NewManager().
|
||||
WithController(
|
||||
supervisorconfig.NewOIDCProviderWatcherController(
|
||||
issuerManager,
|
||||
clock.RealClock{},
|
||||
pinnipedClient,
|
||||
pinnipedInformers.Config().V1alpha1().OIDCProviders(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
supervisorconfig.NewJWKSWriterController(
|
||||
cfg.Labels,
|
||||
kubeClient,
|
||||
pinnipedClient,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
pinnipedInformers.Config().V1alpha1().OIDCProviders(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
supervisorconfig.NewJWKSObserverController(
|
||||
dynamicJWKSProvider,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
pinnipedInformers.Config().V1alpha1().OIDCProviders(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
supervisorconfig.NewTLSCertObserverController(
|
||||
dynamicTLSCertProvider,
|
||||
cfg.NamesConfig.DefaultTLSCertificateSecret,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
pinnipedInformers.Config().V1alpha1().OIDCProviders(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
)
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
pinnipedInformers.Start(ctx.Done())
|
||||
|
||||
// Wait until the caches are synced before returning.
|
||||
kubeInformers.WaitForCacheSync(ctx.Done())
|
||||
pinnipedInformers.WaitForCacheSync(ctx.Done())
|
||||
|
||||
go controllerManager.Start(ctx)
|
||||
}
|
||||
|
||||
func newClients() (kubernetes.Interface, pinnipedclientset.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create kube client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the Pinniped API.
|
||||
pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create pinniped client: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, pinnipedClient, nil
|
||||
}
|
||||
|
||||
func run(serverInstallationNamespace string, cfg *supervisor.Config) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kubeClient, pinnipedClient, err := newClients()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(serverInstallationNamespace),
|
||||
)
|
||||
|
||||
pinnipedInformers := pinnipedinformers.NewSharedInformerFactoryWithOptions(
|
||||
pinnipedClient,
|
||||
defaultResyncInterval,
|
||||
pinnipedinformers.WithNamespace(serverInstallationNamespace),
|
||||
)
|
||||
|
||||
// Serve the /healthz endpoint and make all other paths result in 404.
|
||||
healthMux := http.NewServeMux()
|
||||
healthMux.Handle("/healthz", http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = writer.Write([]byte("ok"))
|
||||
}))
|
||||
|
||||
dynamicJWKSProvider := jwks.NewDynamicJWKSProvider()
|
||||
dynamicTLSCertProvider := provider.NewDynamicTLSCertProvider()
|
||||
|
||||
// OIDC endpoints will be served by the oidProvidersManager, and any non-OIDC paths will fallback to the healthMux.
|
||||
oidProvidersManager := manager.NewManager(healthMux, dynamicJWKSProvider)
|
||||
|
||||
startControllers(
|
||||
ctx,
|
||||
cfg,
|
||||
oidProvidersManager,
|
||||
dynamicJWKSProvider,
|
||||
dynamicTLSCertProvider,
|
||||
kubeClient,
|
||||
pinnipedClient,
|
||||
kubeInformers,
|
||||
pinnipedInformers,
|
||||
)
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
httpListener, err := net.Listen("tcp", ":8080")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create listener: %w", err)
|
||||
}
|
||||
defer func() { _ = httpListener.Close() }()
|
||||
start(ctx, httpListener, oidProvidersManager)
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
httpsListener, err := tls.Listen("tcp", ":8443", &tls.Config{
|
||||
MinVersion: tls.VersionTLS12, // Allow v1.2 because clients like the default `curl` on MacOS don't support 1.3 yet.
|
||||
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
|
||||
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
|
||||
klog.InfoS("GetCertificate called for port 8443",
|
||||
"info.ServerName", info.ServerName,
|
||||
"foundSNICert", cert != nil,
|
||||
"foundDefaultCert", defaultCert != nil,
|
||||
)
|
||||
if cert == nil {
|
||||
cert = defaultCert
|
||||
}
|
||||
return cert, nil
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create listener: %w", err)
|
||||
}
|
||||
defer func() { _ = httpsListener.Close() }()
|
||||
start(ctx, httpsListener, oidProvidersManager)
|
||||
|
||||
klog.InfoS("supervisor is ready",
|
||||
"httpAddress", httpListener.Addr().String(),
|
||||
"httpsAddress", httpsListener.Addr().String(),
|
||||
)
|
||||
|
||||
gotSignal := waitForSignal()
|
||||
klog.InfoS("supervisor exiting", "signal", gotSignal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get())
|
||||
klog.Infof("Command-line arguments were: %s %s %s", os.Args[0], os.Args[1], os.Args[2])
|
||||
|
||||
// Discover in which namespace we are installed.
|
||||
podInfo, err := downward.Load(os.Args[1])
|
||||
if err != nil {
|
||||
klog.Fatal(fmt.Errorf("could not read pod metadata: %w", err))
|
||||
}
|
||||
|
||||
// Read the server config file.
|
||||
cfg, err := supervisor.FromPath(os.Args[2])
|
||||
if err != nil {
|
||||
klog.Fatal(fmt.Errorf("could not load config: %w", err))
|
||||
}
|
||||
|
||||
if err := run(podInfo.Namespace, cfg); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var alphaCmd = &cobra.Command{
|
||||
Use: "alpha",
|
||||
Short: "alpha",
|
||||
Long: "alpha subcommands (syntax or flags are still subject to change)",
|
||||
SilenceUsage: true, // do not print usage message when commands fail
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(alphaCmd)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// mustMarkRequired marks the given flags as required on the provided cobra.Command. If any of the names are wrong, it panics.
|
||||
func mustMarkRequired(cmd *cobra.Command, flags ...string) {
|
||||
for _, flag := range flags {
|
||||
if err := cmd.MarkFlagRequired(flag); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mustMarkHidden marks the given flags as hidden on the provided cobra.Command. If any of the names are wrong, it panics.
|
||||
func mustMarkHidden(cmd *cobra.Command, flags ...string) {
|
||||
for _, flag := range flags {
|
||||
if err := cmd.Flags().MarkHidden(flag); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMustMarkRequired(t *testing.T) {
|
||||
require.NotPanics(t, func() { mustMarkRequired(&cobra.Command{}) })
|
||||
require.NotPanics(t, func() {
|
||||
cmd := &cobra.Command{}
|
||||
cmd.Flags().String("known-flag", "", "")
|
||||
mustMarkRequired(cmd, "known-flag")
|
||||
})
|
||||
require.Panics(t, func() { mustMarkRequired(&cobra.Command{}, "unknown-flag") })
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
auth1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
"go.pinniped.dev/internal/client"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newExchangeCredentialCmd(os.Args, os.Stdout, os.Stderr).cmd)
|
||||
}
|
||||
|
||||
type exchangeCredentialCommand struct {
|
||||
// runFunc is called by the cobra.Command.Run hook. It is included here for
|
||||
// testability.
|
||||
runFunc func(stdout, stderr io.Writer)
|
||||
|
||||
// cmd is the cobra.Command for this CLI command. It is included here for
|
||||
// testability.
|
||||
cmd *cobra.Command
|
||||
}
|
||||
|
||||
func newExchangeCredentialCmd(args []string, stdout, stderr io.Writer) *exchangeCredentialCommand {
|
||||
c := &exchangeCredentialCommand{
|
||||
runFunc: runExchangeCredential,
|
||||
}
|
||||
|
||||
c.cmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
c.runFunc(stdout, stderr)
|
||||
},
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "exchange-credential",
|
||||
Short: "Exchange a credential for a cluster-specific access credential",
|
||||
Long: here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_AUTHENTICATOR_TYPE: the type of authenticator to authenticate
|
||||
against (e.g., "webhook")
|
||||
- PINNIPED_AUTHENTICATOR_NAME: the name of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
`),
|
||||
}
|
||||
|
||||
c.cmd.SetArgs(args)
|
||||
c.cmd.SetOut(stdout)
|
||||
c.cmd.SetErr(stderr)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type envGetter func(string) (string, bool)
|
||||
type tokenExchanger func(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
authenticator corev1.TypedLocalObjectReference,
|
||||
token string,
|
||||
caBundle string,
|
||||
apiEndpoint string,
|
||||
) (*clientauthenticationv1beta1.ExecCredential, error)
|
||||
|
||||
const (
|
||||
ErrMissingEnvVar = constable.Error("failed to get credential: environment variable not set")
|
||||
ErrInvalidAuthenticatorType = constable.Error("invalid authenticator type")
|
||||
)
|
||||
|
||||
func runExchangeCredential(stdout, _ io.Writer) {
|
||||
err := exchangeCredential(os.LookupEnv, client.ExchangeToken, stdout, 30*time.Second)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func exchangeCredential(envGetter envGetter, tokenExchanger tokenExchanger, outputWriter io.Writer, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
namespace, varExists := envGetter("PINNIPED_NAMESPACE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_NAMESPACE")
|
||||
}
|
||||
|
||||
authenticatorType, varExists := envGetter("PINNIPED_AUTHENTICATOR_TYPE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_AUTHENTICATOR_TYPE")
|
||||
}
|
||||
|
||||
authenticatorName, varExists := envGetter("PINNIPED_AUTHENTICATOR_NAME")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_AUTHENTICATOR_NAME")
|
||||
}
|
||||
|
||||
token, varExists := envGetter("PINNIPED_TOKEN")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_TOKEN")
|
||||
}
|
||||
|
||||
caBundle, varExists := envGetter("PINNIPED_CA_BUNDLE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_CA_BUNDLE")
|
||||
}
|
||||
|
||||
apiEndpoint, varExists := envGetter("PINNIPED_K8S_API_ENDPOINT")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_K8S_API_ENDPOINT")
|
||||
}
|
||||
|
||||
authenticator := corev1.TypedLocalObjectReference{Name: authenticatorName}
|
||||
switch strings.ToLower(authenticatorType) {
|
||||
case "webhook":
|
||||
authenticator.APIGroup = &auth1alpha1.SchemeGroupVersion.Group
|
||||
authenticator.Kind = "WebhookAuthenticator"
|
||||
default:
|
||||
return fmt.Errorf(`%w: %q, supported values are "webhook"`, ErrInvalidAuthenticatorType, authenticatorType)
|
||||
}
|
||||
|
||||
cred, err := tokenExchanger(ctx, namespace, authenticator, token, caBundle, apiEndpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get credential: %w", err)
|
||||
}
|
||||
|
||||
err = json.NewEncoder(outputWriter).Encode(cred)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal response to stdout: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func envVarNotSetError(varName string) error {
|
||||
return fmt.Errorf("%w: %s", ErrMissingEnvVar, varName)
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageForExchangeCredential = here.Doc(`
|
||||
Usage:
|
||||
exchange-credential [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for exchange-credential
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpForExchangeCredential = here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_AUTHENTICATOR_TYPE: the type of authenticator to authenticate
|
||||
against (e.g., "webhook")
|
||||
- PINNIPED_AUTHENTICATOR_NAME: the name of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
|
||||
Usage:
|
||||
exchange-credential [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for exchange-credential
|
||||
`)
|
||||
)
|
||||
|
||||
func TestNewCredentialExchangeCmd(t *testing.T) {
|
||||
spec.Run(t, "newCredentialExchangeCmd", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var stdout, stderr *bytes.Buffer
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
stdout, stderr = bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{})
|
||||
})
|
||||
|
||||
it("calls runFunc and does not print usage or help when correct arguments and flags are used", func() {
|
||||
c := newExchangeCredentialCmd([]string{}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
r.NoError(c.cmd.Execute())
|
||||
r.True(runFuncCalled)
|
||||
r.Empty(stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
|
||||
it("fails when args are passed", func() {
|
||||
c := newExchangeCredentialCmd([]string{"some-arg"}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
errorMessage := `unknown command "some-arg" for "exchange-credential"`
|
||||
r.EqualError(c.cmd.Execute(), errorMessage)
|
||||
r.False(runFuncCalled)
|
||||
|
||||
output := "Error: " + errorMessage + "\n" + knownGoodUsageForExchangeCredential
|
||||
r.Equal(output, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
|
||||
it("prints a nice help message", func() {
|
||||
c := newExchangeCredentialCmd([]string{"--help"}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
r.NoError(c.cmd.Execute())
|
||||
r.False(runFuncCalled)
|
||||
r.Equal(knownGoodHelpForExchangeCredential, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
func TestExchangeCredential(t *testing.T) {
|
||||
spec.Run(t, "cmd.exchangeCredential", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var buffer *bytes.Buffer
|
||||
var tokenExchanger tokenExchanger
|
||||
var fakeEnv map[string]string
|
||||
|
||||
var envGetter envGetter = func(envVarName string) (string, bool) {
|
||||
value, present := fakeEnv[envVarName]
|
||||
if !present {
|
||||
return "", false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
buffer = new(bytes.Buffer)
|
||||
fakeEnv = map[string]string{
|
||||
"PINNIPED_NAMESPACE": "namespace from env",
|
||||
"PINNIPED_AUTHENTICATOR_TYPE": "Webhook",
|
||||
"PINNIPED_AUTHENTICATOR_NAME": "webhook name from env",
|
||||
"PINNIPED_TOKEN": "token from env",
|
||||
"PINNIPED_CA_BUNDLE": "ca bundle from env",
|
||||
"PINNIPED_K8S_API_ENDPOINT": "k8s api from env",
|
||||
}
|
||||
})
|
||||
|
||||
when("env vars are missing", func() {
|
||||
it("returns an error when PINNIPED_NAMESPACE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_NAMESPACE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_NAMESPACE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_AUTHENTICATOR_TYPE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_AUTHENTICATOR_TYPE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_AUTHENTICATOR_TYPE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_AUTHENTICATOR_NAME is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_AUTHENTICATOR_NAME")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_AUTHENTICATOR_NAME")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_TOKEN is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_TOKEN")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_TOKEN")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_CA_BUNDLE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_CA_BUNDLE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_CA_BUNDLE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_K8S_API_ENDPOINT is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_K8S_API_ENDPOINT")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_K8S_API_ENDPOINT")
|
||||
})
|
||||
})
|
||||
|
||||
when("env vars are invalid", func() {
|
||||
it("returns an error when PINNIPED_AUTHENTICATOR_TYPE is missing", func() {
|
||||
fakeEnv["PINNIPED_AUTHENTICATOR_TYPE"] = "invalid"
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, `invalid authenticator type: "invalid", supported values are "webhook"`)
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange fails", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: some error")
|
||||
})
|
||||
})
|
||||
|
||||
when("the JSON encoder fails", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, &testutil.ErrorWriter{ReturnError: fmt.Errorf("some IO error")}, 30*time.Second)
|
||||
r.EqualError(err, "failed to marshal response to stdout: some IO error")
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange times out", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 1*time.Millisecond)
|
||||
r.EqualError(err, "failed to get credential: context deadline exceeded")
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange succeeds", func() {
|
||||
var actualNamespace, actualToken, actualCaBundle, actualAPIEndpoint string
|
||||
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
actualNamespace, actualToken, actualCaBundle, actualAPIEndpoint = namespace, token, caBundle, apiEndpoint
|
||||
now := metav1.NewTime(time.Date(2020, 7, 29, 1, 2, 3, 0, time.UTC))
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &now,
|
||||
ClientCertificateData: "some certificate",
|
||||
ClientKeyData: "some key",
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
|
||||
it("writes the execCredential to the given writer", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.NoError(err)
|
||||
r.Equal(fakeEnv["PINNIPED_NAMESPACE"], actualNamespace)
|
||||
r.Equal(fakeEnv["PINNIPED_TOKEN"], actualToken)
|
||||
r.Equal(fakeEnv["PINNIPED_CA_BUNDLE"], actualCaBundle)
|
||||
r.Equal(fakeEnv["PINNIPED_K8S_API_ENDPOINT"], actualAPIEndpoint)
|
||||
expected := `{
|
||||
"kind": "ExecCredential",
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"spec": {},
|
||||
"status": {
|
||||
"expirationTimestamp":"2020-07-29T01:02:03Z",
|
||||
"clientCertificateData": "some certificate",
|
||||
"clientKeyData":"some key",
|
||||
"token": "some token"
|
||||
}
|
||||
}`
|
||||
r.JSONEq(expected, buffer.String())
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
@@ -1,344 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newGetKubeConfigCommand().Command())
|
||||
}
|
||||
|
||||
type getKubeConfigFlags struct {
|
||||
token string
|
||||
kubeconfig string
|
||||
contextOverride string
|
||||
namespace string
|
||||
authenticatorName string
|
||||
authenticatorType string
|
||||
}
|
||||
|
||||
type getKubeConfigCommand struct {
|
||||
flags getKubeConfigFlags
|
||||
// Test mocking points
|
||||
getPathToSelf func() (string, error)
|
||||
kubeClientCreator func(restConfig *rest.Config) (pinnipedclientset.Interface, error)
|
||||
}
|
||||
|
||||
func newGetKubeConfigCommand() *getKubeConfigCommand {
|
||||
return &getKubeConfigCommand{
|
||||
flags: getKubeConfigFlags{
|
||||
namespace: "pinniped",
|
||||
},
|
||||
getPathToSelf: os.Executable,
|
||||
kubeClientCreator: func(restConfig *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedclientset.NewForConfig(restConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *getKubeConfigCommand) Command() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
RunE: c.run,
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "get-kubeconfig",
|
||||
Short: "Print a kubeconfig for authenticating into a cluster via Pinniped",
|
||||
Long: here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
`),
|
||||
}
|
||||
cmd.Flags().StringVar(&c.flags.token, "token", "", "Credential to include in the resulting kubeconfig output (Required)")
|
||||
cmd.Flags().StringVar(&c.flags.kubeconfig, "kubeconfig", c.flags.kubeconfig, "Path to the kubeconfig file")
|
||||
cmd.Flags().StringVar(&c.flags.contextOverride, "kubeconfig-context", c.flags.contextOverride, "Kubeconfig context override")
|
||||
cmd.Flags().StringVar(&c.flags.namespace, "pinniped-namespace", c.flags.namespace, "Namespace in which Pinniped was installed")
|
||||
cmd.Flags().StringVar(&c.flags.authenticatorType, "authenticator-type", c.flags.authenticatorType, "Authenticator type (e.g., 'webhook')")
|
||||
cmd.Flags().StringVar(&c.flags.authenticatorName, "authenticator-name", c.flags.authenticatorType, "Authenticator name")
|
||||
mustMarkRequired(cmd, "token")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *getKubeConfigCommand) run(cmd *cobra.Command, args []string) error {
|
||||
fullPathToSelf, err := c.getPathToSelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find path to self: %w", err)
|
||||
}
|
||||
|
||||
clientConfig := newClientConfig(c.flags.kubeconfig, c.flags.contextOverride)
|
||||
|
||||
currentKubeConfig, err := clientConfig.RawConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientset, err := c.kubeClientCreator(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authenticatorType, authenticatorName := c.flags.authenticatorType, c.flags.authenticatorName
|
||||
if authenticatorType == "" || authenticatorName == "" {
|
||||
authenticatorType, authenticatorName, err = getDefaultAuthenticator(clientset, c.flags.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
credentialIssuer, err := fetchPinnipedCredentialIssuer(clientset, c.flags.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if credentialIssuer.Status.KubeConfigInfo == nil {
|
||||
return constable.Error(`CredentialIssuer "pinniped-config" was missing KubeConfigInfo`)
|
||||
}
|
||||
|
||||
v1Cluster, err := copyCurrentClusterFromExistingKubeConfig(currentKubeConfig, c.flags.contextOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = issueWarningForNonMatchingServerOrCA(v1Cluster, credentialIssuer, cmd.ErrOrStderr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := newPinnipedKubeconfig(v1Cluster, fullPathToSelf, c.flags.token, c.flags.namespace, authenticatorType, authenticatorName)
|
||||
|
||||
err = writeConfigAsYAML(cmd.OutOrStdout(), config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func issueWarningForNonMatchingServerOrCA(v1Cluster v1.Cluster, credentialIssuer *configv1alpha1.CredentialIssuer, warningsWriter io.Writer) error {
|
||||
credentialIssuerCA, err := base64.StdEncoding.DecodeString(credentialIssuer.Status.KubeConfigInfo.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v1Cluster.Server != credentialIssuer.Status.KubeConfigInfo.Server ||
|
||||
!bytes.Equal(v1Cluster.CertificateAuthorityData, credentialIssuerCA) {
|
||||
_, err := warningsWriter.Write([]byte("WARNING: Server and certificate authority did not match between local kubeconfig and Pinniped's CredentialIssuer on the cluster. Using local kubeconfig values.\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("output write error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type noAuthenticatorError struct{ Namespace string }
|
||||
|
||||
func (e noAuthenticatorError) Error() string {
|
||||
return fmt.Sprintf(`no authenticators were found in namespace %q`, e.Namespace)
|
||||
}
|
||||
|
||||
type indeterminateAuthenticatorError struct{ Namespace string }
|
||||
|
||||
func (e indeterminateAuthenticatorError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
`multiple authenticators were found in namespace %q, so --authenticator-name/--authenticator-type must be specified`,
|
||||
e.Namespace,
|
||||
)
|
||||
}
|
||||
|
||||
func getDefaultAuthenticator(clientset pinnipedclientset.Interface, namespace string) (string, string, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
webhooks, err := clientset.AuthenticationV1alpha1().WebhookAuthenticators(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
type ref struct{ authenticatorType, authenticatorName string }
|
||||
authenticators := make([]ref, 0, len(webhooks.Items))
|
||||
for _, webhook := range webhooks.Items {
|
||||
authenticators = append(authenticators, ref{authenticatorType: "webhook", authenticatorName: webhook.Name})
|
||||
}
|
||||
|
||||
if len(authenticators) == 0 {
|
||||
return "", "", noAuthenticatorError{namespace}
|
||||
}
|
||||
if len(authenticators) > 1 {
|
||||
return "", "", indeterminateAuthenticatorError{namespace}
|
||||
}
|
||||
return authenticators[0].authenticatorType, authenticators[0].authenticatorName, nil
|
||||
}
|
||||
|
||||
func fetchPinnipedCredentialIssuer(clientset pinnipedclientset.Interface, pinnipedInstallationNamespace string) (*configv1alpha1.CredentialIssuer, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
credentialIssuers, err := clientset.ConfigV1alpha1().CredentialIssuers(pinnipedInstallationNamespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(credentialIssuers.Items) == 0 {
|
||||
return nil, constable.Error(fmt.Sprintf(
|
||||
`No CredentialIssuer was found in namespace "%s". Is Pinniped installed on this cluster in namespace "%s"?`,
|
||||
pinnipedInstallationNamespace,
|
||||
pinnipedInstallationNamespace,
|
||||
))
|
||||
}
|
||||
|
||||
if len(credentialIssuers.Items) > 1 {
|
||||
return nil, constable.Error(fmt.Sprintf(
|
||||
`More than one CredentialIssuer was found in namespace "%s"`,
|
||||
pinnipedInstallationNamespace,
|
||||
))
|
||||
}
|
||||
|
||||
return &credentialIssuers.Items[0], nil
|
||||
}
|
||||
|
||||
func newClientConfig(kubeconfigPathOverride string, currentContextName string) clientcmd.ClientConfig {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.ExplicitPath = kubeconfigPathOverride
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{
|
||||
CurrentContext: currentContextName,
|
||||
})
|
||||
return clientConfig
|
||||
}
|
||||
|
||||
func writeConfigAsYAML(outputWriter io.Writer, config v1.Config) error {
|
||||
output, err := yaml.Marshal(&config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("YAML serialization error: %w", err)
|
||||
}
|
||||
|
||||
_, err = outputWriter.Write(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output write error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyCurrentClusterFromExistingKubeConfig(currentKubeConfig clientcmdapi.Config, currentContextNameOverride string) (v1.Cluster, error) {
|
||||
v1Cluster := v1.Cluster{}
|
||||
|
||||
contextName := currentKubeConfig.CurrentContext
|
||||
if currentContextNameOverride != "" {
|
||||
contextName = currentContextNameOverride
|
||||
}
|
||||
|
||||
err := v1.Convert_api_Cluster_To_v1_Cluster(
|
||||
currentKubeConfig.Clusters[currentKubeConfig.Contexts[contextName].Cluster],
|
||||
&v1Cluster,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return v1.Cluster{}, err
|
||||
}
|
||||
|
||||
return v1Cluster, nil
|
||||
}
|
||||
|
||||
func newPinnipedKubeconfig(v1Cluster v1.Cluster, fullPathToSelf string, token string, namespace string, authenticatorType string, authenticatorName string) v1.Config {
|
||||
clusterName := "pinniped-cluster"
|
||||
userName := "pinniped-user"
|
||||
|
||||
return v1.Config{
|
||||
Kind: "Config",
|
||||
APIVersion: v1.SchemeGroupVersion.Version,
|
||||
Preferences: v1.Preferences{},
|
||||
Clusters: []v1.NamedCluster{
|
||||
{
|
||||
Name: clusterName,
|
||||
Cluster: v1Cluster,
|
||||
},
|
||||
},
|
||||
Contexts: []v1.NamedContext{
|
||||
{
|
||||
Name: clusterName,
|
||||
Context: v1.Context{
|
||||
Cluster: clusterName,
|
||||
AuthInfo: userName,
|
||||
},
|
||||
},
|
||||
},
|
||||
AuthInfos: []v1.NamedAuthInfo{
|
||||
{
|
||||
Name: userName,
|
||||
AuthInfo: v1.AuthInfo{
|
||||
Exec: &v1.ExecConfig{
|
||||
Command: fullPathToSelf,
|
||||
Args: []string{"exchange-credential"},
|
||||
Env: []v1.ExecEnvVar{
|
||||
{
|
||||
Name: "PINNIPED_K8S_API_ENDPOINT",
|
||||
Value: v1Cluster.Server,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_CA_BUNDLE",
|
||||
Value: string(v1Cluster.CertificateAuthorityData)},
|
||||
{
|
||||
Name: "PINNIPED_NAMESPACE",
|
||||
Value: namespace,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_TOKEN",
|
||||
Value: token,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_AUTHENTICATOR_TYPE",
|
||||
Value: authenticatorType,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_AUTHENTICATOR_NAME",
|
||||
Value: authenticatorName,
|
||||
},
|
||||
},
|
||||
APIVersion: clientauthenticationv1beta1.SchemeGroupVersion.String(),
|
||||
InstallHint: "The Pinniped CLI is required to authenticate to the current cluster.\n" +
|
||||
"For more information, please visit https://pinniped.dev",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: clusterName,
|
||||
}
|
||||
}
|
||||
@@ -1,401 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
authv1alpha "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
pinnipedfake "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageForGetKubeConfig = here.Doc(`
|
||||
Usage:
|
||||
get-kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
--authenticator-name string Authenticator name
|
||||
--authenticator-type string Authenticator type (e.g., 'webhook')
|
||||
-h, --help help for get-kubeconfig
|
||||
--kubeconfig string Path to the kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context override
|
||||
--pinniped-namespace string Namespace in which Pinniped was installed (default "pinniped")
|
||||
--token string Credential to include in the resulting kubeconfig output (Required)
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpForGetKubeConfig = here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
|
||||
Usage:
|
||||
get-kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
--authenticator-name string Authenticator name
|
||||
--authenticator-type string Authenticator type (e.g., 'webhook')
|
||||
-h, --help help for get-kubeconfig
|
||||
--kubeconfig string Path to the kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context override
|
||||
--pinniped-namespace string Namespace in which Pinniped was installed (default "pinniped")
|
||||
--token string Credential to include in the resulting kubeconfig output (Required)
|
||||
`)
|
||||
)
|
||||
|
||||
func TestNewGetKubeConfigCmd(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdout: knownGoodHelpForGetKubeConfig,
|
||||
},
|
||||
{
|
||||
name: "missing required flag",
|
||||
args: []string{},
|
||||
wantError: true,
|
||||
wantStdout: `Error: required flag(s) "token" not set` + "\n" + knownGoodUsageForGetKubeConfig,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd := newGetKubeConfigCommand().Command()
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type expectedKubeconfigYAML struct {
|
||||
clusterCAData string
|
||||
clusterServer string
|
||||
command string
|
||||
token string
|
||||
pinnipedEndpoint string
|
||||
pinnipedCABundle string
|
||||
namespace string
|
||||
authenticatorType string
|
||||
authenticatorName string
|
||||
}
|
||||
|
||||
func (e expectedKubeconfigYAML) String() string {
|
||||
return here.Docf(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: %s
|
||||
server: %s
|
||||
name: pinniped-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped-cluster
|
||||
user: pinniped-user
|
||||
name: pinniped-cluster
|
||||
current-context: pinniped-cluster
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped-user
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- exchange-credential
|
||||
command: %s
|
||||
env:
|
||||
- name: PINNIPED_K8S_API_ENDPOINT
|
||||
value: %s
|
||||
- name: PINNIPED_CA_BUNDLE
|
||||
value: %s
|
||||
- name: PINNIPED_NAMESPACE
|
||||
value: %s
|
||||
- name: PINNIPED_TOKEN
|
||||
value: %s
|
||||
- name: PINNIPED_AUTHENTICATOR_TYPE
|
||||
value: %s
|
||||
- name: PINNIPED_AUTHENTICATOR_NAME
|
||||
value: %s
|
||||
installHint: |-
|
||||
The Pinniped CLI is required to authenticate to the current cluster.
|
||||
For more information, please visit https://pinniped.dev
|
||||
`, e.clusterCAData, e.clusterServer, e.command, e.pinnipedEndpoint, e.pinnipedCABundle, e.namespace, e.token, e.authenticatorType, e.authenticatorName)
|
||||
}
|
||||
|
||||
func newCredentialIssuer(name, namespace, server, certificateAuthorityData string) *configv1alpha1.CredentialIssuer {
|
||||
return &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CredentialIssuer",
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
KubeConfigInfo: &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
||||
Server: server,
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(certificateAuthorityData)),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
mocks func(*getKubeConfigCommand)
|
||||
wantError string
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "failure to get path to self",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.getPathToSelf = func() (string, error) {
|
||||
return "", fmt.Errorf("some error getting path to self")
|
||||
}
|
||||
},
|
||||
wantError: "could not find path to self: some error getting path to self",
|
||||
},
|
||||
{
|
||||
name: "kubeconfig does not exist",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.kubeconfig = "./testdata/does-not-exist.yaml"
|
||||
},
|
||||
wantError: "stat ./testdata/does-not-exist.yaml: no such file or directory",
|
||||
},
|
||||
{
|
||||
name: "fail to get client",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return nil, fmt.Errorf("some error configuring clientset")
|
||||
}
|
||||
},
|
||||
wantError: "some error configuring clientset",
|
||||
},
|
||||
{
|
||||
name: "fail to get authenticators",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
clientset := pinnipedfake.NewSimpleClientset()
|
||||
clientset.PrependReactor("*", "*", func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some error getting authenticators")
|
||||
})
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
},
|
||||
wantError: "some error getting authenticators",
|
||||
},
|
||||
{
|
||||
name: "zero authenticators",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(), nil
|
||||
}
|
||||
},
|
||||
wantError: `no authenticators were found in namespace "test-namespace"`,
|
||||
},
|
||||
{
|
||||
name: "multiple authenticators",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
&authv1alpha.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "webhook-one"}},
|
||||
&authv1alpha.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "webhook-two"}},
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `multiple authenticators were found in namespace "test-namespace", so --authenticator-name/--authenticator-type must be specified`,
|
||||
},
|
||||
{
|
||||
name: "fail to get CredentialIssuers",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
clientset := pinnipedfake.NewSimpleClientset()
|
||||
clientset.PrependReactor("*", "*", func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some error getting CredentialIssuers")
|
||||
})
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
},
|
||||
wantError: "some error getting CredentialIssuers",
|
||||
},
|
||||
{
|
||||
name: "zero CredentialIssuers found",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
newCredentialIssuer("pinniped-config-1", "not-the-test-namespace", "", ""),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `No CredentialIssuer was found in namespace "test-namespace". Is Pinniped installed on this cluster in namespace "test-namespace"?`,
|
||||
},
|
||||
{
|
||||
name: "multiple CredentialIssuers found",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
newCredentialIssuer("pinniped-config-1", "test-namespace", "", ""),
|
||||
newCredentialIssuer("pinniped-config-2", "test-namespace", "", ""),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `More than one CredentialIssuer was found in namespace "test-namespace"`,
|
||||
},
|
||||
{
|
||||
name: "CredentialIssuer missing KubeConfigInfo",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
ci := newCredentialIssuer("pinniped-config", "test-namespace", "", "")
|
||||
ci.Status.KubeConfigInfo = nil
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(ci), nil
|
||||
}
|
||||
},
|
||||
wantError: `CredentialIssuer "pinniped-config" was missing KubeConfigInfo`,
|
||||
},
|
||||
{
|
||||
name: "KubeConfigInfo has invalid base64",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
ci := newCredentialIssuer("pinniped-config", "test-namespace", "https://example.com", "")
|
||||
ci.Status.KubeConfigInfo.CertificateAuthorityData = "invalid-base64-test-ca"
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(ci), nil
|
||||
}
|
||||
},
|
||||
wantError: `illegal base64 data at input byte 7`,
|
||||
},
|
||||
{
|
||||
name: "success using remote CA data",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
ci := newCredentialIssuer("pinniped-config", "test-namespace", "https://fake-server-url-value", "fake-certificate-authority-data-value")
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(ci), nil
|
||||
}
|
||||
},
|
||||
wantStdout: expectedKubeconfigYAML{
|
||||
clusterCAData: "ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==",
|
||||
clusterServer: "https://fake-server-url-value",
|
||||
command: "/path/to/pinniped",
|
||||
token: "test-token",
|
||||
pinnipedEndpoint: "https://fake-server-url-value",
|
||||
pinnipedCABundle: "fake-certificate-authority-data-value",
|
||||
namespace: "test-namespace",
|
||||
authenticatorType: "test-authenticator-type",
|
||||
authenticatorName: "test-authenticator-name",
|
||||
}.String(),
|
||||
},
|
||||
{
|
||||
name: "success using local CA data and discovered authenticator",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
&authv1alpha.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "discovered-authenticator"}},
|
||||
newCredentialIssuer("pinniped-config", "test-namespace", "https://example.com", "test-ca"),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantStderr: `WARNING: Server and certificate authority did not match between local kubeconfig and Pinniped's CredentialIssuer on the cluster. Using local kubeconfig values.`,
|
||||
wantStdout: expectedKubeconfigYAML{
|
||||
clusterCAData: "ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==",
|
||||
clusterServer: "https://fake-server-url-value",
|
||||
command: "/path/to/pinniped",
|
||||
token: "test-token",
|
||||
pinnipedEndpoint: "https://fake-server-url-value",
|
||||
pinnipedCABundle: "fake-certificate-authority-data-value",
|
||||
namespace: "test-namespace",
|
||||
authenticatorType: "webhook",
|
||||
authenticatorName: "discovered-authenticator",
|
||||
}.String(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Start with a default getKubeConfigCommand, set some defaults, then apply any mocks.
|
||||
c := newGetKubeConfigCommand()
|
||||
c.flags.token = "test-token"
|
||||
c.flags.namespace = "test-namespace"
|
||||
c.flags.authenticatorName = "test-authenticator-name"
|
||||
c.flags.authenticatorType = "test-authenticator-type"
|
||||
c.getPathToSelf = func() (string, error) { return "/path/to/pinniped", nil }
|
||||
c.flags.kubeconfig = "./testdata/kubeconfig.yaml"
|
||||
tt.mocks(c)
|
||||
|
||||
cmd := &cobra.Command{}
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs([]string{})
|
||||
err := c.run(cmd, []string{})
|
||||
if tt.wantError != "" {
|
||||
require.EqualError(t, err, tt.wantError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, strings.TrimSpace(tt.wantStdout), strings.TrimSpace(stdout.String()), "unexpected stdout")
|
||||
require.Equal(t, strings.TrimSpace(tt.wantStderr), strings.TrimSpace(stderr.String()), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var loginCmd = &cobra.Command{
|
||||
Use: "login",
|
||||
Short: "login",
|
||||
Long: "Login to a Pinniped server",
|
||||
SilenceUsage: true, // do not print usage message when commands fail
|
||||
}
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(loginCmd)
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
|
||||
"go.pinniped.dev/internal/oidcclient"
|
||||
"go.pinniped.dev/internal/oidcclient/filesession"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
loginCmd.AddCommand(oidcLoginCommand(oidcclient.Login))
|
||||
}
|
||||
|
||||
func oidcLoginCommand(loginFunc func(issuer string, clientID string, opts ...oidcclient.Option) (*oidcclient.Token, error)) *cobra.Command {
|
||||
var (
|
||||
cmd = cobra.Command{
|
||||
Args: cobra.NoArgs,
|
||||
Use: "oidc --issuer ISSUER --client-id CLIENT_ID",
|
||||
Short: "Login using an OpenID Connect provider",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
issuer string
|
||||
clientID string
|
||||
listenPort uint16
|
||||
scopes []string
|
||||
skipBrowser bool
|
||||
sessionCachePath string
|
||||
debugSessionCache bool
|
||||
)
|
||||
cmd.Flags().StringVar(&issuer, "issuer", "", "OpenID Connect issuer URL.")
|
||||
cmd.Flags().StringVar(&clientID, "client-id", "", "OpenID Connect client ID.")
|
||||
cmd.Flags().Uint16Var(&listenPort, "listen-port", 0, "TCP port for localhost listener (authorization code flow only).")
|
||||
cmd.Flags().StringSliceVar(&scopes, "scopes", []string{"offline_access", "openid", "email", "profile"}, "OIDC scopes to request during login.")
|
||||
cmd.Flags().BoolVar(&skipBrowser, "skip-browser", false, "Skip opening the browser (just print the URL).")
|
||||
cmd.Flags().StringVar(&sessionCachePath, "session-cache", filepath.Join(mustGetConfigDir(), "sessions.yaml"), "Path to session cache file.")
|
||||
cmd.Flags().BoolVar(&debugSessionCache, "debug-session-cache", false, "Print debug logs related to the session cache.")
|
||||
mustMarkHidden(&cmd, "debug-session-cache")
|
||||
mustMarkRequired(&cmd, "issuer", "client-id")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
// Initialize the session cache.
|
||||
var sessionOptions []filesession.Option
|
||||
|
||||
// If the hidden --debug-session-cache option is passed, log all the errors from the session cache with klog.
|
||||
if debugSessionCache {
|
||||
logger := klogr.New().WithName("session")
|
||||
sessionOptions = append(sessionOptions, filesession.WithErrorReporter(func(err error) {
|
||||
logger.Error(err, "error during session cache operation")
|
||||
}))
|
||||
}
|
||||
sessionCache := filesession.New(sessionCachePath, sessionOptions...)
|
||||
|
||||
// Initialize the login handler.
|
||||
opts := []oidcclient.Option{
|
||||
oidcclient.WithContext(cmd.Context()),
|
||||
oidcclient.WithScopes(scopes),
|
||||
oidcclient.WithSessionCache(sessionCache),
|
||||
}
|
||||
|
||||
if listenPort != 0 {
|
||||
opts = append(opts, oidcclient.WithListenPort(listenPort))
|
||||
}
|
||||
|
||||
// --skip-browser replaces the default "browser open" function with one that prints to stderr.
|
||||
if skipBrowser {
|
||||
opts = append(opts, oidcclient.WithBrowserOpen(func(url string) error {
|
||||
cmd.PrintErr("Please log in: ", url, "\n")
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
tok, err := loginFunc(issuer, clientID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert the token out to Kubernetes ExecCredential JSON format for output.
|
||||
return json.NewEncoder(cmd.OutOrStdout()).Encode(&clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &tok.IDToken.Expiry,
|
||||
Token: tok.IDToken.Token,
|
||||
},
|
||||
})
|
||||
}
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// mustGetConfigDir returns a directory that follows the XDG base directory convention:
|
||||
// $XDG_CONFIG_HOME defines the base directory relative to which user specific configuration files should
|
||||
// be stored. If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used.
|
||||
// [1] https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
func mustGetConfigDir() string {
|
||||
const xdgAppName = "pinniped"
|
||||
|
||||
if path := os.Getenv("XDG_CONFIG_HOME"); path != "" {
|
||||
return filepath.Join(path, xdgAppName)
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.Join(home, ".config", xdgAppName)
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/oidcclient"
|
||||
)
|
||||
|
||||
func TestLoginOIDCCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cfgDir := mustGetConfigDir()
|
||||
|
||||
time1 := time.Date(3020, 10, 12, 13, 14, 15, 16, time.UTC)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
wantIssuer string
|
||||
wantClientID string
|
||||
wantOptionsCount int
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdout: here.Doc(`
|
||||
Login using an OpenID Connect provider
|
||||
|
||||
Usage:
|
||||
oidc --issuer ISSUER --client-id CLIENT_ID [flags]
|
||||
|
||||
Flags:
|
||||
--client-id string OpenID Connect client ID.
|
||||
-h, --help help for oidc
|
||||
--issuer string OpenID Connect issuer URL.
|
||||
--listen-port uint16 TCP port for localhost listener (authorization code flow only).
|
||||
--scopes strings OIDC scopes to request during login. (default [offline_access,openid,email,profile])
|
||||
--session-cache string Path to session cache file. (default "` + cfgDir + `/sessions.yaml")
|
||||
--skip-browser Skip opening the browser (just print the URL).
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "missing required flags",
|
||||
args: []string{},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: required flag(s) "client-id", "issuer" not set
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "success with minimal options",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
},
|
||||
wantIssuer: "test-issuer",
|
||||
wantClientID: "test-client-id",
|
||||
wantOptionsCount: 3,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
||||
},
|
||||
{
|
||||
name: "success with all options",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
"--skip-browser",
|
||||
"--listen-port", "1234",
|
||||
"--debug-session-cache",
|
||||
},
|
||||
wantIssuer: "test-issuer",
|
||||
wantClientID: "test-client-id",
|
||||
wantOptionsCount: 5,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
gotIssuer string
|
||||
gotClientID string
|
||||
gotOptions []oidcclient.Option
|
||||
)
|
||||
cmd := oidcLoginCommand(func(issuer string, clientID string, opts ...oidcclient.Option) (*oidcclient.Token, error) {
|
||||
gotIssuer = issuer
|
||||
gotClientID = clientID
|
||||
gotOptions = opts
|
||||
return &oidcclient.Token{
|
||||
IDToken: &oidcclient.IDToken{
|
||||
Token: "test-id-token",
|
||||
Expiry: metav1.NewTime(time1),
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
require.Equal(t, tt.wantIssuer, gotIssuer, "unexpected issuer")
|
||||
require.Equal(t, tt.wantClientID, gotClientID, "unexpected client ID")
|
||||
require.Len(t, gotOptions, tt.wantOptionsCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "pinniped",
|
||||
Short: "pinniped",
|
||||
Long: "pinniped is the client-side binary for use with Pinniped-enabled Kubernetes clusters.",
|
||||
SilenceUsage: true, // do not print usage message when commands fail
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
31
cmd/pinniped/cmd/testdata/kubeconfig.yaml
vendored
31
cmd/pinniped/cmd/testdata/kubeconfig.yaml
vendored
@@ -1,31 +0,0 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ== # fake-certificate-authority-data-value
|
||||
server: https://fake-server-url-value
|
||||
name: kind-kind
|
||||
- cluster:
|
||||
certificate-authority-data: c29tZS1vdGhlci1mYWtlLWNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhLXZhbHVl # some-other-fake-certificate-authority-data-value
|
||||
server: https://some-other-fake-server-url-value
|
||||
name: some-other-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kind-kind
|
||||
user: kind-kind
|
||||
name: kind-kind
|
||||
- context:
|
||||
cluster: some-other-cluster
|
||||
user: some-other-user
|
||||
name: some-other-context
|
||||
current-context: kind-kind
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kind-kind
|
||||
user:
|
||||
client-certificate-data: ZmFrZS1jbGllbnQtY2VydGlmaWNhdGUtZGF0YS12YWx1ZQ== # fake-client-certificate-data-value
|
||||
client-key-data: ZmFrZS1jbGllbnQta2V5LWRhdGEtdmFsdWU= # fake-client-key-data-value
|
||||
- name: some-other-user
|
||||
user:
|
||||
client-certificate-data: c29tZS1vdGhlci1mYWtlLWNsaWVudC1jZXJ0aWZpY2F0ZS1kYXRhLXZhbHVl # some-other-fake-client-certificate-data-value
|
||||
client-key-data: c29tZS1vdGhlci1mYWtlLWNsaWVudC1rZXktZGF0YS12YWx1ZQ== # some-other-fake-client-key-data-value
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/component-base/version"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newVersionCommand())
|
||||
}
|
||||
|
||||
func newVersionCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "%#v\n", version.Get())
|
||||
return nil
|
||||
},
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "version",
|
||||
Short: "Print the version of this Pinniped CLI",
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageRegexpForVersion = here.Doc(`
|
||||
Usage:
|
||||
version \[flags\]
|
||||
|
||||
Flags:
|
||||
-h, --help help for version
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpRegexpForVersion = here.Doc(`
|
||||
Print the version of this Pinniped CLI
|
||||
|
||||
Usage:
|
||||
version \[flags\]
|
||||
|
||||
Flags:
|
||||
-h, --help help for version
|
||||
`)
|
||||
|
||||
emptyVersionRegexp = `version.Info{Major:"", Minor:"", GitVersion:".*", GitCommit:".*", GitTreeState:"", BuildDate:".*", GoVersion:".*", Compiler:".*", Platform:".*/.*"}`
|
||||
)
|
||||
|
||||
func TestNewVersionCmd(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantError bool
|
||||
wantStdoutRegexp string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "no flags",
|
||||
args: []string{},
|
||||
wantStdoutRegexp: emptyVersionRegexp + "\n",
|
||||
},
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdoutRegexp: knownGoodHelpRegexpForVersion,
|
||||
},
|
||||
{
|
||||
name: "arg passed",
|
||||
args: []string{"tuna"},
|
||||
wantError: true,
|
||||
wantStdoutRegexp: `Error: unknown command "tuna" for "version"` + "\n" + knownGoodUsageRegexpForVersion,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd := newVersionCommand()
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Regexp(t, tt.wantStdoutRegexp, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import "go.pinniped.dev/cmd/pinniped/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
# Deploying
|
||||
|
||||
## Connecting Pinniped to an Identity Provider
|
||||
|
||||
If you would like to try Pinniped, but you don't have a compatible identity provider,
|
||||
you can use Pinniped's test identity provider.
|
||||
See [deploy/local-user-authenticator/README.md](../../deploy/local-user-authenticator/README.md)
|
||||
for details.
|
||||
|
||||
## Installing the Latest Version with Default Options
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/$(curl https://api.github.com/repos/vmware-tanzu/pinniped/releases/latest -s | jq .name -r)/install-pinniped-concierge.yaml
|
||||
```
|
||||
|
||||
## Installing an Older Version with Default Options
|
||||
|
||||
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number
|
||||
and use it to replace the version number in the URL below.
|
||||
|
||||
```bash
|
||||
# Replace v0.2.0 with your preferred version in the URL below
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/v0.2.0/install-pinniped-concierge.yaml
|
||||
```
|
||||
|
||||
## Installing with Custom Options
|
||||
|
||||
Creating your own deployment YAML file requires `ytt` from [Carvel](https://carvel.dev/) to template the YAML files
|
||||
in the `deploy/concierge` directory.
|
||||
Either [install `ytt`](https://get-ytt.io/) or use the [container image from Dockerhub](https://hub.docker.com/r/k14s/image/tags).
|
||||
|
||||
1. `git clone` this repo and `git checkout` the release version tag of the release that you would like to deploy.
|
||||
1. The configuration options are in [deploy/concierge/values.yml](values.yaml).
|
||||
Fill in the values in that file, or override those values using additional `ytt` command-line options in
|
||||
the command below. Use the release version tag as the `image_tag` value.
|
||||
2. In a terminal, cd to this `deploy/concierge` directory
|
||||
3. To generate the final YAML files, run `ytt --file .`
|
||||
4. Deploy the generated YAML using your preferred deployment tool, such as `kubectl` or [`kapp`](https://get-kapp.io/).
|
||||
For example: `ytt --file . | kapp deploy --yes --app pinniped --diff-changes --file -`
|
||||
@@ -1,146 +0,0 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.4.0
|
||||
creationTimestamp: null
|
||||
name: webhookauthenticators.authentication.concierge.pinniped.dev
|
||||
spec:
|
||||
group: authentication.concierge.pinniped.dev
|
||||
names:
|
||||
categories:
|
||||
- all
|
||||
- authenticator
|
||||
- authenticators
|
||||
kind: WebhookAuthenticator
|
||||
listKind: WebhookAuthenticatorList
|
||||
plural: webhookauthenticators
|
||||
singular: webhookauthenticator
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.endpoint
|
||||
name: Endpoint
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: WebhookAuthenticator describes the configuration of a webhook
|
||||
authenticator.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec for configuring the authenticator.
|
||||
properties:
|
||||
endpoint:
|
||||
description: Webhook server endpoint URL.
|
||||
minLength: 1
|
||||
pattern: ^https://
|
||||
type: string
|
||||
tls:
|
||||
description: TLS configuration.
|
||||
properties:
|
||||
certificateAuthorityData:
|
||||
description: X.509 Certificate Authority (base64-encoded PEM bundle).
|
||||
If omitted, a default set of system roots will be trusted.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- endpoint
|
||||
type: object
|
||||
status:
|
||||
description: Status of the authenticator.
|
||||
properties:
|
||||
conditions:
|
||||
description: Represents the observations of the authenticator's current
|
||||
state.
|
||||
items:
|
||||
description: Condition status of a resource (mirrored from the metav1.Condition
|
||||
type added in Kubernetes 1.19). In a future API version we can
|
||||
switch to using the upstream type. See https://github.com/kubernetes/apimachinery/blob/v0.19.0/pkg/apis/meta/v1/types.go#L1353-L1413.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- type
|
||||
x-kubernetes-list-type: map
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -1,108 +0,0 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.4.0
|
||||
creationTimestamp: null
|
||||
name: credentialissuers.config.concierge.pinniped.dev
|
||||
spec:
|
||||
group: config.concierge.pinniped.dev
|
||||
names:
|
||||
kind: CredentialIssuer
|
||||
listKind: CredentialIssuerList
|
||||
plural: credentialissuers
|
||||
singular: credentialissuer
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
status:
|
||||
description: Status of the credential issuer.
|
||||
properties:
|
||||
kubeConfigInfo:
|
||||
description: Information needed to form a valid Pinniped-based kubeconfig
|
||||
using this credential issuer.
|
||||
properties:
|
||||
certificateAuthorityData:
|
||||
description: The K8s API server CA bundle.
|
||||
minLength: 1
|
||||
type: string
|
||||
server:
|
||||
description: The K8s API server URL.
|
||||
minLength: 1
|
||||
pattern: ^https://|^http://
|
||||
type: string
|
||||
required:
|
||||
- certificateAuthorityData
|
||||
- server
|
||||
type: object
|
||||
strategies:
|
||||
description: List of integration strategies that were attempted by
|
||||
Pinniped.
|
||||
items:
|
||||
description: Status of an integration strategy that was attempted
|
||||
by Pinniped.
|
||||
properties:
|
||||
lastUpdateTime:
|
||||
description: When the status was last checked.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Human-readable description of the current status.
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status.
|
||||
enum:
|
||||
- FetchedKey
|
||||
- CouldNotFetchKey
|
||||
type: string
|
||||
status:
|
||||
description: Status of the attempted integration strategy.
|
||||
enum:
|
||||
- Success
|
||||
- Error
|
||||
type: string
|
||||
type:
|
||||
description: Type of integration attempted.
|
||||
enum:
|
||||
- KubeClusterSigningCertificate
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- strategies
|
||||
type: object
|
||||
required:
|
||||
- status
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -1,199 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("@ytt:json", "json")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
|
||||
#@ if not data.values.into_namespace:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: #@ data.values.namespace
|
||||
labels: #@ labels()
|
||||
#@ end
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("config")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
data:
|
||||
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
|
||||
#@yaml/text-templated-strings
|
||||
pinniped.yaml: |
|
||||
discovery:
|
||||
url: (@= data.values.discovery_url or "null" @)
|
||||
api:
|
||||
servingCertificate:
|
||||
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
|
||||
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
|
||||
names:
|
||||
servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @)
|
||||
credentialIssuer: (@= defaultResourceNameWithSuffix("config") @)
|
||||
apiService: (@= defaultResourceNameWithSuffix("api") @)
|
||||
labels: (@= json.encode(labels()).rstrip() @)
|
||||
kubeCertAgent:
|
||||
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)
|
||||
(@ if data.values.kube_cert_agent_image: @)
|
||||
image: (@= data.values.kube_cert_agent_image @)
|
||||
(@ else: @)
|
||||
(@ if data.values.image_digest: @)
|
||||
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
|
||||
(@ else: @)
|
||||
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
|
||||
(@ end @)
|
||||
(@ end @)
|
||||
(@ if data.values.image_pull_dockerconfigjson: @)
|
||||
imagePullSecrets:
|
||||
- image-pull-secret
|
||||
(@ end @)
|
||||
---
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-pull-secret
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
||||
#@ end
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
replicas: #@ data.values.replicas
|
||||
selector:
|
||||
matchLabels: #@ defaultLabel()
|
||||
template:
|
||||
metadata:
|
||||
labels: #@ defaultLabel()
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
serviceAccountName: #@ defaultResourceName()
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
imagePullSecrets:
|
||||
- name: image-pull-secret
|
||||
#@ end
|
||||
containers:
|
||||
- name: #@ defaultResourceName()
|
||||
#@ if data.values.image_digest:
|
||||
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
||||
#@ else:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
args:
|
||||
- --config=/etc/config/pinniped.yaml
|
||||
- --downward-api-path=/etc/podinfo
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 2
|
||||
timeoutSeconds: 15
|
||||
periodSeconds: 10
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 2
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: #@ defaultResourceNameWithSuffix("config")
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master #! Allow running on master nodes too
|
||||
effect: NoSchedule
|
||||
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
|
||||
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
|
||||
#!priorityClassName: system-cluster-critical
|
||||
#! This will help make sure our multiple pods run on different nodes, making
|
||||
#! our deployment "more" "HA".
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels: #@ defaultLabel()
|
||||
topologyKey: kubernetes.io/hostname
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
|
||||
name: #@ defaultResourceNameWithSuffix("api")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector: #@ defaultLabel()
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
targetPort: 8443
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1alpha1.login.concierge.pinniped.dev
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
version: v1alpha1
|
||||
group: login.concierge.pinniped.dev
|
||||
groupPriorityMinimum: 2500
|
||||
versionPriority: 10
|
||||
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
|
||||
service:
|
||||
name: #@ defaultResourceNameWithSuffix("api")
|
||||
namespace: #@ namespace()
|
||||
port: 443
|
||||
@@ -1,30 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("@ytt:template", "template")
|
||||
|
||||
#@ def defaultResourceName():
|
||||
#@ return data.values.app_name
|
||||
#@ end
|
||||
|
||||
#@ def defaultResourceNameWithSuffix(suffix):
|
||||
#@ return data.values.app_name + "-" + suffix
|
||||
#@ end
|
||||
|
||||
#@ def namespace():
|
||||
#@ if data.values.into_namespace:
|
||||
#@ return data.values.into_namespace
|
||||
#@ else:
|
||||
#@ return data.values.namespace
|
||||
#@ end
|
||||
#@ end
|
||||
|
||||
#@ def defaultLabel():
|
||||
app: #@ data.values.app_name
|
||||
#@ end
|
||||
|
||||
#@ def labels():
|
||||
_: #@ template.replace(defaultLabel())
|
||||
_: #@ template.replace(data.values.custom_labels)
|
||||
#@ end
|
||||
@@ -1,200 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
|
||||
#! Give permission to various cluster-scoped objects
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ namespaces ]
|
||||
verbs: [ get, list, watch ]
|
||||
- apiGroups: [ apiregistration.k8s.io ]
|
||||
resources: [ apiservices ]
|
||||
verbs: [ create, get, list, patch, update, watch ]
|
||||
- apiGroups: [ admissionregistration.k8s.io ]
|
||||
resources: [ validatingwebhookconfigurations, mutatingwebhookconfigurations ]
|
||||
verbs: [ get, list, watch ]
|
||||
- apiGroups: [ policy ]
|
||||
resources: [ podsecuritypolicies ]
|
||||
verbs: [ use ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ services ]
|
||||
verbs: [ create, get, list, patch, update, watch ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ secrets ]
|
||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||
#! We need to be able to CRUD pods in our namespace so we can reconcile the kube-cert-agent pods.
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods/exec ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ config.concierge.pinniped.dev, authentication.concierge.pinniped.dev ]
|
||||
resources: [ "*" ]
|
||||
verbs: [ create, get, list, update, watch ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to read pods in the kube-system namespace so we can find the API server's private key
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("kube-system-pod-read")
|
||||
namespace: kube-system
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list, watch ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("kube-system-pod-read")
|
||||
namespace: kube-system
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ defaultResourceNameWithSuffix("kube-system-pod-read")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Allow both authenticated and unauthenticated TokenCredentialRequests (i.e. allow all requests)
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("create-token-credential-requests")
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [ login.concierge.pinniped.dev ]
|
||||
resources: [ tokencredentialrequests ]
|
||||
verbs: [ create ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("create-token-credential-requests")
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:authenticated
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:unauthenticated
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: #@ defaultResourceNameWithSuffix("create-token-credential-requests")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permissions for subjectaccessreviews, tokenreview that is needed by aggregated api servers
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permissions for a special configmap of CA bundles that is needed by aggregated api servers
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("extension-apiserver-authentication-reader")
|
||||
namespace: kube-system
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to list and watch ConfigMaps in kube-public
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("cluster-info-lister-watcher")
|
||||
namespace: kube-public
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ configmaps ]
|
||||
verbs: [ list, watch ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("cluster-info-lister-watcher")
|
||||
namespace: kube-public
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ defaultResourceNameWithSuffix("cluster-info-lister-watcher")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,52 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@data/values
|
||||
---
|
||||
|
||||
app_name: pinniped-concierge
|
||||
|
||||
#! Creates a new namespace statically in yaml with the given name and installs the app into that namespace.
|
||||
namespace: pinniped-concierge
|
||||
#! If specified, assumes that a namespace of the given name already exists and installs the app into that namespace.
|
||||
#! If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used.
|
||||
into_namespace: #! e.g. my-preexisting-namespace
|
||||
|
||||
#! All resources created statically by yaml at install-time and all resources created dynamically
|
||||
#! by controllers at runtime will be labelled with `app: $app_name` and also with the labels
|
||||
#! specified here. The value of `custom_labels` must be a map of string keys to string values.
|
||||
#! The app can be uninstalled either by:
|
||||
#! 1. Deleting the static install-time yaml resources including the static namespace, which will cascade and also delete
|
||||
#! resources that were dynamically created by controllers at runtime
|
||||
#! 2. Or, deleting all resources by label, which does not assume that there was a static install-time yaml namespace.
|
||||
custom_labels: {} #! e.g. {myCustomLabelName: myCustomLabelValue, otherCustomLabelName: otherCustomLabelValue}
|
||||
|
||||
#! Specify how many replicas of the Pinniped server to run.
|
||||
replicas: 2
|
||||
|
||||
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
|
||||
image_repo: docker.io/getpinniped/pinniped-server
|
||||
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
|
||||
image_tag: latest
|
||||
|
||||
#! Optionally specify a different image for the "kube-cert-agent" pod which is scheduled
|
||||
#! on the control plane. This image needs only to include `sleep` and `cat` binaries.
|
||||
#! By default, the same image specified for image_repo/image_digest/image_tag will be re-used.
|
||||
kube_cert_agent_image:
|
||||
|
||||
#! Specifies a secret to be used when pulling the above `image_repo` container image.
|
||||
#! Can be used when the above image_repo is a private registry.
|
||||
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
|
||||
#! Optional.
|
||||
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
|
||||
|
||||
#! Pinniped will try to guess the right K8s API URL for sharing that information with potential clients.
|
||||
#! This settings allows the guess to be overridden.
|
||||
#! Optional.
|
||||
discovery_url: #! e.g., https://example.com
|
||||
|
||||
#! Specify the duration and renewal interval for the API serving certificate.
|
||||
#! The defaults are set to expire the cert about every 30 days, and to rotate it
|
||||
#! about every 25 days.
|
||||
api_serving_certificate_duration_seconds: 2592000
|
||||
api_serving_certificate_renew_before_seconds: 2160000
|
||||
@@ -1,17 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "labels")
|
||||
|
||||
#@overlay/match by=overlay.subset({"kind": "CustomResourceDefinition", "metadata":{"name":"credentialissuers.config.concierge.pinniped.dev"}}), expects=1
|
||||
---
|
||||
metadata:
|
||||
#@overlay/match missing_ok=True
|
||||
labels: #@ labels()
|
||||
|
||||
#@overlay/match by=overlay.subset({"kind": "CustomResourceDefinition", "metadata":{"name":"webhookauthenticators.authentication.concierge.pinniped.dev"}}), expects=1
|
||||
---
|
||||
metadata:
|
||||
#@overlay/match missing_ok=True
|
||||
labels: #@ labels()
|
||||
@@ -1,163 +0,0 @@
|
||||
# Deploying local-user-authenticator
|
||||
|
||||
## What is local-user-authenticator?
|
||||
|
||||
The local-user-authenticator app is an identity provider used for integration testing and demos.
|
||||
If you would like to demo Pinniped, but you don't have a compatible identity provider handy,
|
||||
you can use Pinniped's local-user-authenticator identity provider. Note that this is not recommended for
|
||||
production use.
|
||||
|
||||
The local-user-authenticator is a Kubernetes Deployment which runs a webhook server that implements the Kubernetes
|
||||
[Webhook Token Authentication interface](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication).
|
||||
|
||||
User accounts can be created and edited dynamically using `kubectl` commands (see below).
|
||||
|
||||
## Installing the Latest Version with Default Options
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/latest/download/install-local-user-authenticator.yaml
|
||||
```
|
||||
|
||||
## Installing an Older Version with Default Options
|
||||
|
||||
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number
|
||||
and use it to replace the version number in the URL below.
|
||||
|
||||
```bash
|
||||
# Replace v0.2.0 with your preferred version in the URL below
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/v0.2.0/install-local-user-authenticator.yaml
|
||||
```
|
||||
|
||||
## Installing with Custom Options
|
||||
|
||||
Creating your own deployment YAML file requires `ytt` from [Carvel](https://carvel.dev/) to template the YAML files
|
||||
in the `deploy/local-user-authenticator` directory.
|
||||
Either [install `ytt`](https://get-ytt.io/) or use the [container image from Dockerhub](https://hub.docker.com/r/k14s/image/tags).
|
||||
|
||||
1. `git clone` this repo and `git checkout` the release version tag of the release that you would like to deploy.
|
||||
1. The configuration options are in [deploy/local-user-authenticator/values.yml](values.yaml).
|
||||
Fill in the values in that file, or override those values using additional `ytt` command-line options in
|
||||
the command below. Use the release version tag as the `image_tag` value.
|
||||
2. In a terminal, cd to this `deploy/local-user-authenticator` directory
|
||||
3. To generate the final YAML files, run `ytt --file .`
|
||||
4. Deploy the generated YAML using your preferred deployment tool, such as `kubectl` or [`kapp`](https://get-kapp.io/).
|
||||
For example: `ytt --file . | kapp deploy --yes --app local-user-authenticator --diff-changes --file -`
|
||||
|
||||
## Configuring After Installing
|
||||
|
||||
### Create Users
|
||||
|
||||
Use `kubectl` to create, edit, and delete user accounts by creating a `Secret` for each user account in the same
|
||||
namespace where local-user-authenticator is deployed. The name of the `Secret` resource is the username.
|
||||
Store the user's group membership and `bcrypt` encrypted password as the contents of the `Secret`.
|
||||
For example, to create a user named `pinny-the-seal` with the password `password123`
|
||||
who belongs to the groups `group1` and `group2`, use:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic pinny-the-seal \
|
||||
--namespace local-user-authenticator \
|
||||
--from-literal=groups=group1,group2 \
|
||||
--from-literal=passwordHash=$(htpasswd -nbBC 10 x password123 | sed -e "s/^x://")
|
||||
```
|
||||
|
||||
Note that the above command requires a tool capable of generating a `bcrypt` hash. It uses `htpasswd`,
|
||||
which is installed on most macOS systems, and can be
|
||||
installed on some Linux systems via the `apache2-utils` package (e.g., `apt-get install apache2-utils`).
|
||||
|
||||
### Get the local-user-authenticator App's Auto-Generated Certificate Authority Bundle
|
||||
|
||||
Fetch the auto-generated CA bundle for the local-user-authenticator's HTTP TLS endpoint.
|
||||
|
||||
```bash
|
||||
kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator \
|
||||
-o jsonpath={.data.caCertificate} \
|
||||
| base64 -d \
|
||||
| tee /tmp/local-user-authenticator-ca
|
||||
```
|
||||
|
||||
### Configuring Pinniped to Use local-user-authenticator as an Identity Provider
|
||||
|
||||
When installing Pinniped on the same cluster, configure local-user-authenticator as an Identity Provider for Pinniped
|
||||
using the webhook URL `https://local-user-authenticator.local-user-authenticator.svc/authenticate`
|
||||
along with the CA bundle fetched by the above command. See [doc/demo.md](../../doc/demo.md) for an example.
|
||||
|
||||
## Optional: Manually Testing the Webhook Endpoint After Installing
|
||||
|
||||
The following steps demonstrate the API of the local-user-authenticator app. Typically, a user would not need to
|
||||
interact with this API directly. Pinniped will automatically integrate with this API if the local-user-authenticator
|
||||
is configured as an identity provider for Pinniped.
|
||||
|
||||
1. Start a pod from which you can curl the endpoint from inside the cluster.
|
||||
|
||||
```bash
|
||||
kubectl run curlpod --image=curlimages/curl --command -- /bin/sh -c "while true; do echo hi; sleep 120; done"
|
||||
```
|
||||
|
||||
1. Copy the CA bundle that was fetched above onto the new pod.
|
||||
|
||||
```bash
|
||||
kubectl cp /tmp/local-user-authenticator-ca curlpod:/tmp/local-user-authenticator-ca
|
||||
```
|
||||
|
||||
1. Run a `curl` command to try to authenticate as the user created above.
|
||||
|
||||
```bash
|
||||
kubectl -it exec curlpod -- curl https://local-user-authenticator.local-user-authenticator.svc/authenticate \
|
||||
--cacert /tmp/local-user-authenticator-ca \
|
||||
-H 'Content-Type: application/json' -H 'Accept: application/json' -d '
|
||||
{
|
||||
"apiVersion": "authentication.k8s.io/v1beta1",
|
||||
"kind": "TokenReview",
|
||||
"spec": {
|
||||
"token": "pinny-the-seal:password123"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
When authentication is successful the above command should return some JSON similar to the following.
|
||||
Note that the value of `authenticated` is `true` to indicate a successful authentication.
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": "TokenReview",
|
||||
"apiVersion": "authentication.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {},
|
||||
"status": {
|
||||
"authenticated": true,
|
||||
"user": {
|
||||
"username": "pinny-the-seal",
|
||||
"uid": "19c433ec-8f58-44ca-9ef0-2d1081ccb876",
|
||||
"groups": [
|
||||
"group1",
|
||||
"group2"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Trying the above `curl` command again with the wrong username or password in the body of the request
|
||||
should result in a JSON response which indicates that the authentication failed.
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": "TokenReview",
|
||||
"apiVersion": "authentication.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {},
|
||||
"status": {
|
||||
"user": {}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Remove the curl pod.
|
||||
|
||||
```bash
|
||||
kubectl delete pod curlpod
|
||||
```
|
||||
@@ -1,83 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
labels:
|
||||
name: local-user-authenticator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
---
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-pull-secret
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
||||
#@ end
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-user-authenticator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
serviceAccountName: local-user-authenticator
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
imagePullSecrets:
|
||||
- name: image-pull-secret
|
||||
#@ end
|
||||
containers:
|
||||
- name: local-user-authenticator
|
||||
#@ if data.values.image_digest:
|
||||
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
||||
#@ else:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: #! override the default entrypoint
|
||||
- /usr/local/bin/local-user-authenticator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: local-user-authenticator
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
targetPort: 8443
|
||||
@@ -1,30 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [secrets]
|
||||
verbs: [create, get, list, patch, update, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-user-authenticator
|
||||
namespace: local-user-authenticator
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: local-user-authenticator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,16 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@data/values
|
||||
---
|
||||
|
||||
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
|
||||
image_repo: docker.io/getpinniped/pinniped-server
|
||||
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
|
||||
image_tag: latest
|
||||
|
||||
#! Specifies a secret to be used when pulling the above `image_repo` container image.
|
||||
#! Can be used when the above image_repo is a private registry.
|
||||
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
|
||||
#! Optional.
|
||||
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
|
||||
@@ -1,184 +0,0 @@
|
||||
# Deploying the Pinniped Supervisor
|
||||
|
||||
## What is the Pinniped Supervisor?
|
||||
|
||||
The Pinniped Supervisor app is a component of the Pinniped OIDC and Cluster Federation solutions.
|
||||
It can be deployed when those features are needed.
|
||||
|
||||
## Installing the Latest Version with Default Options
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/latest/download/install-pinniped-supervisor.yaml
|
||||
```
|
||||
|
||||
## Installing an Older Version with Default Options
|
||||
|
||||
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number
|
||||
and use it to replace the version number in the URL below.
|
||||
|
||||
```bash
|
||||
# Replace v0.3.0 with your preferred version in the URL below
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/v0.3.0/install-pinniped-supervisor.yaml
|
||||
```
|
||||
|
||||
## Installing with Custom Options
|
||||
|
||||
Creating your own deployment YAML file requires `ytt` from [Carvel](https://carvel.dev/) to template the YAML files
|
||||
in the `deploy/supervisor` directory.
|
||||
Either [install `ytt`](https://get-ytt.io/) or use the [container image from Dockerhub](https://hub.docker.com/r/k14s/image/tags).
|
||||
|
||||
1. `git clone` this repo and `git checkout` the release version tag of the release that you would like to deploy.
|
||||
1. The configuration options are in [deploy/supervisor/values.yml](values.yaml).
|
||||
Fill in the values in that file, or override those values using additional `ytt` command-line options in
|
||||
the command below. Use the release version tag as the `image_tag` value.
|
||||
2. In a terminal, cd to this `deploy/supervisor` directory
|
||||
3. To generate the final YAML files, run `ytt --file .`
|
||||
4. Deploy the generated YAML using your preferred deployment tool, such as `kubectl` or [`kapp`](https://get-kapp.io/).
|
||||
For example: `ytt --file . | kapp deploy --yes --app pinniped-supervisor --diff-changes --file -`
|
||||
|
||||
## Configuring After Installing
|
||||
|
||||
### Exposing the Supervisor App as a Service
|
||||
|
||||
The Supervisor app's endpoints should be exposed as HTTPS endpoints with proper TLS certificates signed by a
|
||||
Certificate Authority which will be trusted by your user's web browsers. Because there are
|
||||
many ways to expose TLS services from a Kubernetes cluster, the Supervisor app leaves this up to the user.
|
||||
The most common ways are:
|
||||
|
||||
1. Define an [`Ingress` resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) with TLS certificates.
|
||||
In this case, the ingress will terminate TLS. Typically, the ingress will then talk plain HTTP to its backend,
|
||||
which would be a NodePort or LoadBalancer Service in front of the HTTP port 8080 of the Supervisor pods.
|
||||
|
||||
The required configuration of the Ingress is specific to your cluster's Ingress Controller, so please refer to the
|
||||
documentation from your Kubernetes provider. If you are using a cluster from a cloud provider, then you'll probably
|
||||
want to start with that provider's documentation. For example, if your cluster is a Google GKE cluster, refer to
|
||||
the [GKE documentation for Ingress](https://cloud.google.com/kubernetes-engine/docs/concepts/ingress).
|
||||
Otherwise, the Kubernetes documentation provides a list of popular
|
||||
[Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/), including
|
||||
[Contour](https://projectcontour.io/) and many others.
|
||||
|
||||
1. Or, define a [TCP LoadBalancer Service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer)
|
||||
which is a layer 4 load balancer and does not terminate TLS. In this case, the Supervisor app will need to be
|
||||
configured with TLS certificates and will terminate the TLS connection itself (see the section about OIDCProvider
|
||||
below). The LoadBalancer Service should be configured to use the HTTPS port 443 of the Supervisor pods as its `targetPort`.
|
||||
|
||||
*Warning:* Do not expose the Supervisor's port 8080 to the public. It would not be secure for the OIDC protocol
|
||||
to use HTTP, because the user's secret OIDC tokens would be transmitted across the network without encryption.
|
||||
|
||||
1. Or, expose the Supervisor app using a Kubernetes service mesh technology, e.g. [Istio](https://istio.io/).
|
||||
Please see the documentation for your service mesh. Generally, the setup would be similar to the description
|
||||
above for defining an ingress, expect the service mesh would probably provide both the ingress with TLS termination
|
||||
and the service.
|
||||
|
||||
For either of the first two options mentioned above, if you installed using `ytt` then you can use
|
||||
the related `service_*` options from [deploy/supervisor/values.yml](values.yaml) to create a Service.
|
||||
If you installed using `install-supervisor.yaml` then you can create
|
||||
the Service separately after installing the Supervisor app. There is no `Ingress` included in the `ytt` templates,
|
||||
so if you choose to use an Ingress then you'll need to create that separately after installing the Supervisor app.
|
||||
|
||||
#### Example: Using a LoadBalancer Service
|
||||
|
||||
This is an example of creating a LoadBalancer Service to expose port 8443 of the Supervisor app outside the cluster.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pinniped-supervisor-loadbalancer
|
||||
# Assuming that this is the namespace where the supervisor was installed. This is the default in install-supervisor.yaml.
|
||||
namespace: pinniped-supervisor
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
# Assuming that this is how the supervisor pods are labeled. This is the default in install-supervisor.yaml.
|
||||
app: pinniped-supervisor
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
targetPort: 8443
|
||||
```
|
||||
|
||||
#### Example: Using a NodePort Service
|
||||
|
||||
A NodePort Service exposes the app as a port on the nodes of the cluster.
|
||||
|
||||
This is convenient for use with kind clusters, because kind can
|
||||
[expose node ports as localhost ports on the host machine](https://kind.sigs.k8s.io/docs/user/configuration/#extra-port-mappings)
|
||||
without requiring an Ingress, although
|
||||
[kind also supports several Ingress Controllers](https://kind.sigs.k8s.io/docs/user/ingress).
|
||||
|
||||
A NodePort Service could also be used behind an Ingress which is terminating TLS.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pinniped-supervisor-nodeport
|
||||
# Assuming that this is the namespace where the supervisor was installed. This is the default in install-supervisor.yaml.
|
||||
namespace: pinniped-supervisor
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
# Assuming that this is how the supervisor pods are labeled. This is the default in install-supervisor.yaml.
|
||||
app: pinniped-supervisor
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
nodePort: 31234 # This is the port that you would forward to the kind host. Or omit this key for a random port.
|
||||
```
|
||||
|
||||
### Configuring the Supervisor to Act as an OIDC Provider
|
||||
|
||||
The Supervisor can be configured as an OIDC provider by creating `OIDCProvider` resources
|
||||
in the same namespace where the Supervisor app was installed. For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: config.supervisor.pinniped.dev/v1alpha1
|
||||
kind: OIDCProvider
|
||||
metadata:
|
||||
name: my-provider
|
||||
# Assuming that this is the namespace where the supervisor was installed. This is the default in install-supervisor.yaml.
|
||||
namespace: pinniped-supervisor
|
||||
spec:
|
||||
# The hostname would typically match the DNS name of the public ingress or load balancer for the cluster.
|
||||
# Any path can be specified, which allows a single hostname to have multiple different issuers. The path is optional.
|
||||
issuer: https://my-issuer.example.com/any/path
|
||||
|
||||
# Optionally configure the name of a Secret in the same namespace, of type `kubernetes.io/tls`,
|
||||
# which contains the TLS serving certificate for the HTTPS endpoints served by this OIDC Provider.
|
||||
tls:
|
||||
secretName: my-tls-cert-secret
|
||||
```
|
||||
|
||||
#### Configuring TLS for the Supervisor OIDC Endpoints
|
||||
|
||||
If you have terminated TLS outside the app, for example using an Ingress with TLS certificates, then you do not need to
|
||||
configure TLS certificates on the OIDCProvider.
|
||||
|
||||
If you are using a LoadBalancer Service to expose the Supervisor app outside your cluster, then you will
|
||||
also need to configure the Supervisor app to terminate TLS. There are two places to configure TLS certificates:
|
||||
|
||||
1. Each `OIDCProvider` can be configured with TLS certificates, using the `spec.tls.secretName` field.
|
||||
|
||||
1. The default TLS certificate for all OIDC providers can be configured by creating a Secret called
|
||||
`pinniped-supervisor-default-tls-certificate` in the same namespace in which the Supervisor was installed.
|
||||
|
||||
The default TLS certificate will be used for all OIDC providers which did not declare a `spec.tls.secretName`.
|
||||
Also, the `spec.tls.secretName` will be ignored for incoming requests to the OIDC endpoints
|
||||
that use an IP address as the host, so those requests will always present the default TLS certificates
|
||||
to the client. When the request includes the hostname, and that hostname matches the hostname of an `Issuer`,
|
||||
then the TLS certificate defined by the `spec.tls.secretName` will be used. If that issuer did not
|
||||
define `spec.tls.secretName` then the default TLS certificate will be used. If neither exists,
|
||||
then the client will get a TLS error because the server will not present any TLS certificate.
|
||||
|
||||
It is recommended that you have a DNS entry for your load balancer or Ingress, and that you configure the
|
||||
OIDC provider's `Issuer` using that DNS hostname, and that the TLS certificate for that provider also
|
||||
covers that same hostname.
|
||||
|
||||
You can create the certificate Secrets however you like, for example you could use [cert-manager](https://cert-manager.io/)
|
||||
or `kubectl create secret tls`.
|
||||
Keep in mind that your users will load some of these endpoints in their web browsers, so the TLS certificates
|
||||
should be signed by a Certificate Authority that will be trusted by their browsers.
|
||||
@@ -1,121 +0,0 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.4.0
|
||||
creationTimestamp: null
|
||||
name: oidcproviders.config.supervisor.pinniped.dev
|
||||
spec:
|
||||
group: config.supervisor.pinniped.dev
|
||||
names:
|
||||
kind: OIDCProvider
|
||||
listKind: OIDCProviderList
|
||||
plural: oidcproviders
|
||||
singular: oidcprovider
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: OIDCProvider describes the configuration of an OIDC provider.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec of the OIDC provider.
|
||||
properties:
|
||||
issuer:
|
||||
description: "Issuer is the OIDC Provider's issuer, per the OIDC Discovery
|
||||
Metadata document, as well as the identifier that it will use for
|
||||
the iss claim in issued JWTs. This field will also be used as the
|
||||
base URL for any endpoints used by the OIDC Provider (e.g., if your
|
||||
issuer is https://example.com/foo, then your authorization endpoint
|
||||
will look like https://example.com/foo/some/path/to/auth/endpoint).
|
||||
\n See https://openid.net/specs/openid-connect-discovery-1_0.html#rfc.section.3
|
||||
for more information."
|
||||
minLength: 1
|
||||
type: string
|
||||
tls:
|
||||
description: TLS configures how this OIDCProvider is served over Transport
|
||||
Layer Security (TLS).
|
||||
properties:
|
||||
secretName:
|
||||
description: "SecretName is an optional name of a Secret in the
|
||||
same namespace, of type `kubernetes.io/tls`, which contains
|
||||
the TLS serving certificate for the HTTPS endpoints served by
|
||||
this OIDCProvider. When provided, the TLS Secret named here
|
||||
must contain keys named `tls.crt` and `tls.key` that contain
|
||||
the certificate and private key to use for TLS. \n Server Name
|
||||
Indication (SNI) is an extension to the Transport Layer Security
|
||||
(TLS) supported by all major browsers. \n SecretName is required
|
||||
if you would like to use different TLS certificates for issuers
|
||||
of different hostnames. SNI requests do not include port numbers,
|
||||
so all issuers with the same DNS hostname must use the same
|
||||
SecretName value even if they have different port numbers. \n
|
||||
SecretName is not required when you would like to use only the
|
||||
HTTP endpoints (e.g. when terminating TLS at an Ingress). It
|
||||
is also not required when you would like all requests to this
|
||||
OIDC Provider's HTTPS endpoints to use the default TLS certificate,
|
||||
which is configured elsewhere. \n When your Issuer URL's host
|
||||
is an IP address, then this field is ignored. SNI does not work
|
||||
for IP addresses."
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- issuer
|
||||
type: object
|
||||
status:
|
||||
description: Status of the OIDC provider.
|
||||
properties:
|
||||
jwksSecret:
|
||||
description: JWKSSecret holds the name of the secret in which this
|
||||
OIDC Provider's signing/verification keys are stored. If it is empty,
|
||||
then the signing/verification keys are either unknown or they don't
|
||||
exist.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime holds the time at which the Status was
|
||||
last updated. It is a pointer to get around some undesirable behavior
|
||||
with respect to the empty metav1.Time value (see https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Message provides human-readable details about the Status.
|
||||
type: string
|
||||
status:
|
||||
description: Status holds an enum that describes the state of this
|
||||
OIDC Provider. Note that this Status can represent success or failure.
|
||||
enum:
|
||||
- Success
|
||||
- Duplicate
|
||||
- Invalid
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -1,141 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("@ytt:json", "json")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
|
||||
#@ if not data.values.into_namespace:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: #@ data.values.namespace
|
||||
labels: #@ labels()
|
||||
#@ end
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("static-config")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
data:
|
||||
#@yaml/text-templated-strings
|
||||
pinniped.yaml: |
|
||||
names:
|
||||
defaultTLSCertificateSecret: (@= defaultResourceNameWithSuffix("default-tls-certificate") @)
|
||||
labels: (@= json.encode(labels()).rstrip() @)
|
||||
---
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-pull-secret
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
||||
#@ end
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
replicas: #@ data.values.replicas
|
||||
selector:
|
||||
matchLabels: #@ defaultLabel()
|
||||
template:
|
||||
metadata:
|
||||
labels: #@ defaultLabel()
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
serviceAccountName: #@ defaultResourceName()
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
imagePullSecrets:
|
||||
- name: image-pull-secret
|
||||
#@ end
|
||||
containers:
|
||||
- name: #@ defaultResourceName()
|
||||
#@ if data.values.image_digest:
|
||||
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
||||
#@ else:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: #! override the default entrypoint
|
||||
- /usr/local/bin/pinniped-supervisor
|
||||
args:
|
||||
- /etc/podinfo
|
||||
- /etc/config/pinniped.yaml
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 2
|
||||
timeoutSeconds: 15
|
||||
periodSeconds: 10
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 2
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: #@ defaultResourceNameWithSuffix("static-config")
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
#! This will help make sure our multiple pods run on different nodes, making
|
||||
#! our deployment "more" "HA".
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels: #@ defaultLabel()
|
||||
topologyKey: kubernetes.io/hostname
|
||||
@@ -1,30 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("@ytt:template", "template")
|
||||
|
||||
#@ def defaultResourceName():
|
||||
#@ return data.values.app_name
|
||||
#@ end
|
||||
|
||||
#@ def defaultResourceNameWithSuffix(suffix):
|
||||
#@ return data.values.app_name + "-" + suffix
|
||||
#@ end
|
||||
|
||||
#@ def namespace():
|
||||
#@ if data.values.into_namespace:
|
||||
#@ return data.values.into_namespace
|
||||
#@ else:
|
||||
#@ return data.values.namespace
|
||||
#@ end
|
||||
#@ end
|
||||
|
||||
#@ def defaultLabel():
|
||||
app: #@ data.values.app_name
|
||||
#@ end
|
||||
|
||||
#@ def labels():
|
||||
_: #@ template.replace(defaultLabel())
|
||||
_: #@ template.replace(data.values.custom_labels)
|
||||
#@ end
|
||||
@@ -1,36 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [secrets]
|
||||
verbs: [create, get, list, patch, update, watch, delete]
|
||||
- apiGroups: [config.supervisor.pinniped.dev]
|
||||
resources: [oidcproviders]
|
||||
verbs: [update, get, list, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceName()
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ defaultResourceName()
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,93 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
|
||||
#@ if data.values.service_http_nodeport_port or data.values.service_https_nodeport_port:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("nodeport")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: #@ data.values.app_name
|
||||
ports:
|
||||
#@ if data.values.service_http_nodeport_port:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: #@ data.values.service_http_nodeport_port
|
||||
targetPort: 8080
|
||||
#@ if data.values.service_http_nodeport_nodeport:
|
||||
nodePort: #@ data.values.service_http_nodeport_nodeport
|
||||
#@ end
|
||||
#@ end
|
||||
#@ if data.values.service_https_nodeport_port:
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: #@ data.values.service_https_nodeport_port
|
||||
targetPort: 8443
|
||||
#@ if data.values.service_https_nodeport_nodeport:
|
||||
nodePort: #@ data.values.service_https_nodeport_nodeport
|
||||
#@ end
|
||||
#@ end
|
||||
#@ end
|
||||
|
||||
#@ if data.values.service_http_clusterip_port or data.values.service_https_clusterip_port:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("clusterip")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector: #@ defaultLabel()
|
||||
ports:
|
||||
#@ if data.values.service_http_clusterip_port:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: #@ data.values.service_http_clusterip_port
|
||||
targetPort: 8080
|
||||
#@ end
|
||||
#@ if data.values.service_https_clusterip_port:
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: #@ data.values.service_https_clusterip_port
|
||||
targetPort: 8443
|
||||
#@ end
|
||||
#@ end
|
||||
|
||||
#@ if data.values.service_http_loadbalancer_port or data.values.service_https_loadbalancer_port:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("loadbalancer")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector: #@ defaultLabel()
|
||||
#@ if data.values.service_loadbalancer_ip:
|
||||
loadBalancerIP: #@ data.values.service_loadbalancer_ip
|
||||
#@ end
|
||||
ports:
|
||||
#@ if data.values.service_http_loadbalancer_port:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: #@ data.values.service_http_loadbalancer_port
|
||||
targetPort: 8080
|
||||
#@ end
|
||||
#@ if data.values.service_https_loadbalancer_port:
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: #@ data.values.service_https_loadbalancer_port
|
||||
targetPort: 8443
|
||||
#@ end
|
||||
#@ end
|
||||
@@ -1,54 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@data/values
|
||||
---
|
||||
|
||||
app_name: pinniped-supervisor
|
||||
|
||||
#! Creates a new namespace statically in yaml with the given name and installs the app into that namespace.
|
||||
namespace: pinniped-supervisor
|
||||
#! If specified, assumes that a namespace of the given name already exists and installs the app into that namespace.
|
||||
#! If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used.
|
||||
into_namespace: #! e.g. my-preexisting-namespace
|
||||
|
||||
#! All resources created statically by yaml at install-time and all resources created dynamically
|
||||
#! by controllers at runtime will be labelled with `app: $app_name` and also with the labels
|
||||
#! specified here. The value of `custom_labels` must be a map of string keys to string values.
|
||||
#! The app can be uninstalled either by:
|
||||
#! 1. Deleting the static install-time yaml resources including the static namespace, which will cascade and also delete
|
||||
#! resources that were dynamically created by controllers at runtime
|
||||
#! 2. Or, deleting all resources by label, which does not assume that there was a static install-time yaml namespace.
|
||||
custom_labels: {} #! e.g. {myCustomLabelName: myCustomLabelValue, otherCustomLabelName: otherCustomLabelValue}
|
||||
|
||||
#! Specify how many replicas of the Pinniped server to run.
|
||||
replicas: 2
|
||||
|
||||
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
|
||||
image_repo: docker.io/getpinniped/pinniped-server
|
||||
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
|
||||
image_tag: latest
|
||||
|
||||
#! Specifies a secret to be used when pulling the above `image_repo` container image.
|
||||
#! Can be used when the above image_repo is a private registry.
|
||||
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
|
||||
#! Optional.
|
||||
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
|
||||
|
||||
#! Specify how to expose the Supervisor app's HTTP and/or HTTPS ports as a Service.
|
||||
#! Typically you would set a value for only one of the following service types, for either HTTP or HTTPS depending on your needs.
|
||||
#! An HTTP service should not be exposed outside the cluster. It would not be secure to serve OIDC endpoints to end users via HTTP.
|
||||
#! Setting any of these values means that a Service of that type will be created.
|
||||
#! Note that all port numbers should be numbers (not strings), i.e. use ytt's `--data-value-yaml` instead of `--data-value`.
|
||||
service_http_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 8080 as its `targetPort`; e.g. 31234
|
||||
service_http_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_http_nodeport_port` is specified; e.g. 31234
|
||||
service_http_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 8080 as its `targetPort`; e.g. 8443
|
||||
service_http_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 8080 as its `targetPort`; e.g. 8443
|
||||
service_https_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 8443 as its `targetPort`; e.g. 31243
|
||||
service_https_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_http_nodeport_port` is specified; e.g. 31243
|
||||
service_https_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 8443 as its `targetPort`; e.g. 8443
|
||||
service_https_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 8443 as its `targetPort`; e.g. 8443
|
||||
#! The `loadBalancerIP` value of the LoadBalancer Service.
|
||||
#! Ignored unless service_http_loadbalancer_port and/or service_https_loadbalancer_port are provided.
|
||||
#! Optional.
|
||||
service_loadbalancer_ip: #! e.g. 1.2.3.4
|
||||
@@ -1,11 +0,0 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "labels")
|
||||
|
||||
#@overlay/match by=overlay.subset({"kind": "CustomResourceDefinition", "metadata":{"name":"oidcproviders.config.supervisor.pinniped.dev"}}), expects=1
|
||||
---
|
||||
metadata:
|
||||
#@overlay/match missing_ok=True
|
||||
labels: #@ labels()
|
||||
@@ -1,75 +0,0 @@
|
||||
# Architecture
|
||||
|
||||
The principal purpose of Pinniped is to allow users to access Kubernetes
|
||||
clusters. Pinniped hopes to enable this access across a wide range of Kubernetes
|
||||
environments with zero configuration.
|
||||
|
||||
This integration is implemented using a credential exchange API which takes as
|
||||
input a credential from the external IDP and returns a credential which is understood by the host
|
||||
Kubernetes cluster.
|
||||
|
||||
<img src="img/pinniped_architecture.svg" alt="Pinniped Architecture Sketch" width="300px"/>
|
||||
|
||||
Pinniped supports various IDP types and implements different integration strategies
|
||||
for various Kubernetes distributions to make authentication possible.
|
||||
|
||||
## Supported Kubernetes Cluster Types
|
||||
|
||||
Pinniped supports the following types of Kubernetes clusters:
|
||||
|
||||
- Clusters where the Kube Controller Manager pod is accessible from Pinniped's pods.
|
||||
|
||||
Support for other types of Kubernetes distributions is coming soon.
|
||||
|
||||
## External Identity Provider Integrations
|
||||
|
||||
Pinniped will consume identity from one or more external identity providers
|
||||
(IDPs). Administrators will configure external IDPs via Kubernetes custom
|
||||
resources allowing Pinniped to be managed using GitOps and standard Kubernetes tools.
|
||||
|
||||
Pinniped supports the following external IDP types.
|
||||
|
||||
1. Any webhook which implements the
|
||||
[Kubernetes TokenReview API](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication).
|
||||
|
||||
In addition to allowing the integration of any existing IDP which implements this API, webhooks also
|
||||
serve as an extension point for Pinniped by allowing for integration of arbitrary custom authenticators.
|
||||
While a custom implementation may be in any language or framework, this project provides a
|
||||
sample implementation in Golang. See the `ServeHTTP` method of
|
||||
[cmd/local-user-authenticator/main.go](../cmd/local-user-authenticator/main.go).
|
||||
|
||||
More IDP types are coming soon.
|
||||
|
||||
## Cluster Integration Strategies
|
||||
|
||||
Pinniped will issue a cluster credential by leveraging cluster-specific
|
||||
functionality. In the near term, cluster integrations will happen via different
|
||||
cluster-specific flows depending on the type of cluster. In the longer term,
|
||||
Pinniped hopes to contribute and leverage upstream Kubernetes extension points that
|
||||
cleanly enable this integration.
|
||||
|
||||
Pinniped supports the following cluster integration strategies.
|
||||
|
||||
1. Pinniped hosts a credential exchange API endpoint via a Kubernetes aggregated API server.
|
||||
This API returns a new cluster-specific credential using the cluster's signing keypair to
|
||||
issue short-lived cluster certificates. (In the future, when the Kubernetes CSR API
|
||||
provides a way to issue short-lived certificates, then the Pinniped credential exchange API
|
||||
will use that instead of using the cluster's signing keypair.)
|
||||
|
||||
More cluster integration strategies are coming soon, which will allow Pinniped to
|
||||
support more Kubernetes cluster types.
|
||||
|
||||
## `kubectl` Integration
|
||||
|
||||
With any of the above IDPs and integration strategies, `kubectl` commands receive the
|
||||
cluster-specific credential via a
|
||||
[Kubernetes client-go credential plugin](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins).
|
||||
Users may use the Pinniped CLI as the credential plugin, or they may use any proprietary CLI
|
||||
built with the [Pinniped Go client library](../generated).
|
||||
|
||||
## Example Cluster Authentication Sequence Diagram
|
||||
|
||||
This diagram demonstrates using `kubectl get pods` with the Pinniped CLI configured as the credential plugin,
|
||||
and with a webhook IDP configured as the identity provider for the Pinniped server.
|
||||
|
||||

|
||||
198
doc/demo.md
198
doc/demo.md
@@ -1,198 +0,0 @@
|
||||
# Trying Pinniped
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A Kubernetes cluster of a type supported by Pinniped as described in [doc/architecture.md](../doc/architecture.md).
|
||||
|
||||
Don't have a cluster handy? Consider using [kind](https://kind.sigs.k8s.io/) on your local machine.
|
||||
See below for an example of using kind.
|
||||
|
||||
1. An identity provider of a type supported by Pinniped as described in [doc/architecture.md](../doc/architecture.md).
|
||||
|
||||
Don't have an identity provider of a type supported by Pinniped handy? No problem, there is a demo identity provider
|
||||
available. Start by installing local-user-authenticator on the same cluster where you would like to try Pinniped
|
||||
by following the directions in [deploy/local-user-authenticator/README.md](../deploy/local-user-authenticator/README.md).
|
||||
See below for an example of deploying this on kind.
|
||||
|
||||
1. A kubeconfig where the current context points to the cluster and has admin-like
|
||||
privileges on that cluster.
|
||||
|
||||
## Overview
|
||||
|
||||
Installing and trying Pinniped on any cluster will consist of the following general steps. See the next section below
|
||||
for a more specific example of installing onto a local kind cluster, including the exact commands to use for that case.
|
||||
|
||||
1. Install Pinniped. See [deploy/concierge/README.md](../deploy/concierge/README.md).
|
||||
1. Download the Pinniped CLI from [Pinniped's github Releases page](https://github.com/vmware-tanzu/pinniped/releases/latest).
|
||||
1. Generate a kubeconfig using the Pinniped CLI. Run `pinniped get-kubeconfig --help` for more information.
|
||||
1. Run `kubectl` commands using the generated kubeconfig. Pinniped will automatically be used for authentication during those commands.
|
||||
|
||||
## Example of Deploying on kind
|
||||
|
||||
[kind](https://kind.sigs.k8s.io) is a tool for creating and managing Kubernetes clusters on your local machine
|
||||
which uses Docker containers as the cluster's "nodes". This is a convenient way to try out Pinniped on a local
|
||||
non-production cluster.
|
||||
|
||||
The following steps will deploy the latest release of Pinniped on kind using the local-user-authenticator component
|
||||
as the identity provider.
|
||||
|
||||
<!-- The following image was uploaded to GitHub's CDN using this awesome trick: https://gist.github.com/vinkla/dca76249ba6b73c5dd66a4e986df4c8d -->
|
||||
<p align="center" width="100%">
|
||||
<img
|
||||
src="https://user-images.githubusercontent.com/25013435/95272990-b2ea9780-07f6-11eb-994d-872e3cb68457.gif"
|
||||
alt="Pinniped Installation Demo"
|
||||
width="80%"
|
||||
/>
|
||||
</p>
|
||||
|
||||
1. Install the tools required for the following steps.
|
||||
|
||||
- [Install kind](https://kind.sigs.k8s.io/docs/user/quick-start/), if not already installed. e.g. `brew install kind` on MacOS.
|
||||
|
||||
- kind depends on Docker. If not already installed, [install Docker](https://docs.docker.com/get-docker/), e.g. `brew cask install docker` on MacOS.
|
||||
|
||||
- This demo requires `kubectl`, which comes with Docker, or can be [installed separately](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
|
||||
|
||||
- This demo requires a tool capable of generating a `bcrypt` hash in order to interact with
|
||||
the webhook. The example below uses `htpasswd`, which is installed on most macOS systems, and can be
|
||||
installed on some Linux systems via the `apache2-utils` package (e.g., `apt-get install
|
||||
apache2-utils`).
|
||||
|
||||
- One of the steps below optionally uses `jq` to help find the latest release version number. It is not required.
|
||||
Install `jq` if you would like, e.g. `brew install jq` on MacOS.
|
||||
|
||||
1. Create a new Kubernetes cluster using `kind create cluster`. Optionally provide a cluster name using the `--name` flag.
|
||||
kind will automatically update your kubeconfig to point to the new cluster as a user with admin-like permissions.
|
||||
|
||||
1. Query GitHub's API for the git tag of the latest Pinniped
|
||||
[release](https://github.com/vmware-tanzu/pinniped/releases/latest).
|
||||
|
||||
```bash
|
||||
pinniped_version=$(curl https://api.github.com/repos/vmware-tanzu/pinniped/releases/latest -s | jq .name -r)
|
||||
```
|
||||
|
||||
Alternatively, [any release version](https://github.com/vmware-tanzu/pinniped/releases)
|
||||
number can be manually selected.
|
||||
|
||||
```bash
|
||||
# Example of manually choosing a release version...
|
||||
pinniped_version=v0.2.0
|
||||
```
|
||||
|
||||
1. Deploy the local-user-authenticator app. This is a demo identity provider. In production, you would use your
|
||||
real identity provider, and therefore would not need to deploy or configure local-user-authenticator.
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/$pinniped_version/install-local-user-authenticator.yaml
|
||||
```
|
||||
|
||||
The `install-local-user-authenticator.yaml` file includes the default deployment options.
|
||||
If you would prefer to customize the available options, please
|
||||
see [deploy/local-user-authenticator/README.md](../deploy/local-user-authenticator/README.md)
|
||||
for instructions on how to deploy using `ytt`.
|
||||
|
||||
1. Create a test user named `pinny-the-seal` in the local-user-authenticator identity provider.
|
||||
|
||||
```bash
|
||||
kubectl create secret generic pinny-the-seal \
|
||||
--namespace local-user-authenticator \
|
||||
--from-literal=groups=group1,group2 \
|
||||
--from-literal=passwordHash=$(htpasswd -nbBC 10 x password123 | sed -e "s/^x://")
|
||||
```
|
||||
|
||||
1. Fetch the auto-generated CA bundle for the local-user-authenticator's HTTP TLS endpoint.
|
||||
|
||||
```bash
|
||||
kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator \
|
||||
-o jsonpath={.data.caCertificate} \
|
||||
| tee /tmp/local-user-authenticator-ca-base64-encoded
|
||||
```
|
||||
|
||||
1. Deploy Pinniped.
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/$pinniped_version/install-pinniped-concierge.yaml
|
||||
```
|
||||
|
||||
The `install-pinniped-concierge.yaml` file includes the default deployment options.
|
||||
If you would prefer to customize the available options, please see [deploy/concierge/README.md](../deploy/concierge/README.md)
|
||||
for instructions on how to deploy using `ytt`.
|
||||
|
||||
1. Create a `WebhookAuthenticator` object to configure Pinniped to authenticate using local-user-authenticator.
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl create --namespace pinniped -f -
|
||||
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
|
||||
kind: WebhookAuthenticator
|
||||
metadata:
|
||||
name: local-user-authenticator
|
||||
spec:
|
||||
endpoint: https://local-user-authenticator.local-user-authenticator.svc/authenticate
|
||||
tls:
|
||||
certificateAuthorityData: $(cat /tmp/local-user-authenticator-ca-base64-encoded)
|
||||
EOF
|
||||
```
|
||||
|
||||
1. Download the latest version of the Pinniped CLI binary for your platform
|
||||
from Pinniped's [latest release](https://github.com/vmware-tanzu/pinniped/releases/latest).
|
||||
|
||||
1. Move the Pinniped CLI binary to your preferred filename and directory. Add the executable bit,
|
||||
e.g. `chmod +x /usr/local/bin/pinniped`.
|
||||
|
||||
1. Generate a kubeconfig for the current cluster. Use `--token` to include a token which should
|
||||
allow you to authenticate as the user that you created above.
|
||||
|
||||
```bash
|
||||
pinniped get-kubeconfig --token "pinny-the-seal:password123" --authenticator-type webhook --authenticator-name local-user-authenticator > /tmp/pinniped-kubeconfig
|
||||
```
|
||||
|
||||
If you are using MacOS, you may get an error dialog that says
|
||||
`“pinniped” cannot be opened because the developer cannot be verified`. Cancel this dialog, open System Preferences,
|
||||
click on Security & Privacy, and click the Allow Anyway button next to the Pinniped message.
|
||||
Run the above command again and another dialog will appear saying
|
||||
`macOS cannot verify the developer of “pinniped”. Are you sure you want to open it?`.
|
||||
Click Open to allow the command to proceed.
|
||||
|
||||
Note that the above command will print a warning to the screen. You can ignore this warning.
|
||||
Pinniped tries to auto-discover the URL for the Kubernetes API server, but it is not able
|
||||
to do so on kind clusters. The warning is just letting you know that the Pinniped CLI decided
|
||||
to ignore the auto-discovery URL and instead use the URL from your existing kubeconfig.
|
||||
|
||||
1. Try using the generated kubeconfig to issue arbitrary `kubectl` commands as
|
||||
the `pinny-the-seal` user.
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped
|
||||
```
|
||||
|
||||
Because this user has no RBAC permissions on this cluster, the previous command
|
||||
results in the error `Error from server (Forbidden): pods is forbidden: User "pinny-the-seal" cannot list resource "pods" in API group "" in the namespace "pinniped"`.
|
||||
However, this does prove that you are authenticated and acting as the `pinny-the-seal` user.
|
||||
|
||||
1. As the admin user, create RBAC rules for the test user to give them permissions to perform actions on the cluster.
|
||||
For example, grant the test user permission to view all cluster resources.
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding pinny-can-read --clusterrole view --user pinny-the-seal
|
||||
```
|
||||
|
||||
1. Use the generated kubeconfig to issue arbitrary `kubectl` commands as the `pinny-the-seal` user.
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped
|
||||
```
|
||||
|
||||
The user has permission to list pods, so the command succeeds this time.
|
||||
Pinniped has provided authentication into the cluster for your `kubectl` command! 🎉
|
||||
|
||||
1. Carry on issuing as many `kubectl` commands as you'd like as the `pinny-the-seal` user.
|
||||
Each invocation will use Pinniped for authentication.
|
||||
You may find it convenient to set the `KUBECONFIG` environment variable rather than passing `--kubeconfig` to each invocation.
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/tmp/pinniped-kubeconfig
|
||||
kubectl get namespaces
|
||||
kubectl get pods -A
|
||||
```
|
||||
|
||||
1. Profit! 💰
|
||||
@@ -1,12 +0,0 @@
|
||||
# `doc/img` README
|
||||
|
||||
## How to Update these Images
|
||||
|
||||
- [pinniped.svg](pinniped.svg) was generated using [`plantuml`](https://plantuml.com/).
|
||||
To regenerate the image, run `plantuml -tsvg pinniped.txt` from this directory.
|
||||
|
||||
- [pinniped_architecture.svg](pinniped_architecture.svg) was created on [draw.io](https://draw.io).
|
||||
It can be opened again for editing on that site by choosing "File" -> "Open from" -> "Device".
|
||||
Because it includes embedded icons it should be exported using "File" -> "Export as" -> "SVG",
|
||||
with the "Transparent Background", "Embed Images", and "Include a copy of my diagram" options
|
||||
checked. The icons in this diagram are from their "CAE" shapes set.
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 43 KiB |
@@ -1,61 +0,0 @@
|
||||
@startuml "pinniped"
|
||||
|
||||
!define K8S_BLUE #326CE5
|
||||
!define K8S_SPRITES_URL https://raw.githubusercontent.com/michiel/plantuml-kubernetes-sprites/master/resource
|
||||
!include K8S_SPRITES_URL/k8s-sprites-unlabeled-25pct.iuml
|
||||
|
||||
participant "User" as USER << ($pod{scale=0.30},K8S_BLUE) >> #LightGreen
|
||||
participant "Kubectl" as KUBECTL << ($ing{scale=0.30},K8S_BLUE) >> #LightSteelBlue
|
||||
participant "Proprietary CLI" as CLI << ($svc{scale=0.30},K8S_BLUE) >> #LightPink
|
||||
participant "Pinniped" as PINNIPED << ($node{scale=0.30},K8S_BLUE) >> #LightGray
|
||||
participant "TokenReview Webhook" as WEBHOOK << ($pod{scale=0.30},K8S_BLUE) >> #LightPink
|
||||
participant "Kubernetes API" as API << ($node{scale=0.30},K8S_BLUE) >> #LightSteelBlue
|
||||
|
||||
legend
|
||||
# <back:lightsalmon>Message contains upstream IDP credentials</back>
|
||||
# <back:lightgreen>Message contains cluster-specific credentials</back>
|
||||
end legend
|
||||
|
||||
USER -> KUBECTL : ""kubectl get pods""
|
||||
activate KUBECTL
|
||||
|
||||
group Acquire cluster-specific credential
|
||||
|
||||
KUBECTL -> CLI : Get cluster-specific credential
|
||||
activate CLI
|
||||
|
||||
CLI -> CLI : Retrieve upstream IDP credential in\norganization-specific way
|
||||
|
||||
CLI -> PINNIPED : <back:lightsalmon>""POST /apis/pinniped.dev/...""</back>
|
||||
activate PINNIPED
|
||||
|
||||
PINNIPED -> WEBHOOK : <back:lightsalmon>""POST /authenticate""</back>
|
||||
activate WEBHOOK
|
||||
|
||||
WEBHOOK -> PINNIPED : ""200 OK"" with user and group information
|
||||
deactivate WEBHOOK
|
||||
|
||||
PINNIPED -> PINNIPED : Issue short-lived cluster-specific credential\nwith user and group information
|
||||
|
||||
PINNIPED -> CLI : <back:lightgreen>""200 OK""</back>
|
||||
deactivate PINNIPED
|
||||
|
||||
CLI -> KUBECTL : Here is a cluster-specific credential
|
||||
|
||||
end
|
||||
|
||||
group Authenticate to cluster with cluster-specific credential
|
||||
|
||||
KUBECTL -> API : <back:lightgreen>""GET /api/v1/pods""</back>
|
||||
activate API
|
||||
|
||||
API -> API : Glean user and group information from\ncluster-specific credential
|
||||
|
||||
API -> KUBECTL : ""200 OK"" with pods
|
||||
deactivate API
|
||||
|
||||
deactivate KUBECTL
|
||||
|
||||
end
|
||||
|
||||
@enduml
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 79 KiB |
@@ -1,68 +0,0 @@
|
||||
<svg id="artwork" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 486 158"><metadata><?xpacket begin="" id="W5M0MpCehiHzreSzNTczkc9d"?>
|
||||
<x:xmpmeta xmlns:x="adobe:ns:meta/" x:xmptk="Adobe XMP Core 6.0-c002 79.164352, 2020/01/30-15:50:38 ">
|
||||
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
|
||||
<rdf:Description rdf:about=""
|
||||
xmlns:lr="http://ns.adobe.com/lightroom/1.0/"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:xmp="http://ns.adobe.com/xap/1.0/"
|
||||
xmlns:xmpMM="http://ns.adobe.com/xap/1.0/mm/"
|
||||
xmlns:stEvt="http://ns.adobe.com/xap/1.0/sType/ResourceEvent#">
|
||||
<lr:hierarchicalSubject>
|
||||
<rdf:Bag>
|
||||
<rdf:li>open source identity</rdf:li>
|
||||
<rdf:li>open source identity|636062</rdf:li>
|
||||
<rdf:li>open source identity|Pinniped</rdf:li>
|
||||
</rdf:Bag>
|
||||
</lr:hierarchicalSubject>
|
||||
<dc:subject>
|
||||
<rdf:Bag>
|
||||
<rdf:li>open source identity</rdf:li>
|
||||
<rdf:li>636062</rdf:li>
|
||||
<rdf:li>Pinniped</rdf:li>
|
||||
</rdf:Bag>
|
||||
</dc:subject>
|
||||
<xmp:MetadataDate>2020-09-17T16:06:40-07:00</xmp:MetadataDate>
|
||||
<xmpMM:InstanceID>xmp.iid:932334bf-97ee-471a-96c9-c4e5ff526fe4</xmpMM:InstanceID>
|
||||
<xmpMM:DocumentID>xmp.did:38396587-b56b-42c3-8f3e-f8e9c91f532b</xmpMM:DocumentID>
|
||||
<xmpMM:OriginalDocumentID>xmp.did:38396587-b56b-42c3-8f3e-f8e9c91f532b</xmpMM:OriginalDocumentID>
|
||||
<xmpMM:History>
|
||||
<rdf:Seq>
|
||||
<rdf:li>
|
||||
<rdf:Description>
|
||||
<stEvt:action>saved</stEvt:action>
|
||||
<stEvt:instanceID>xmp.iid:38396587-b56b-42c3-8f3e-f8e9c91f532b</stEvt:instanceID>
|
||||
<stEvt:when>2020-09-17T16:06:35-07:00</stEvt:when>
|
||||
<stEvt:softwareAgent>Adobe Bridge 2020 (Macintosh)</stEvt:softwareAgent>
|
||||
<stEvt:changed>/metadata</stEvt:changed>
|
||||
</rdf:Description>
|
||||
</rdf:li>
|
||||
<rdf:li>
|
||||
<rdf:Description>
|
||||
<stEvt:action>saved</stEvt:action>
|
||||
<stEvt:instanceID>xmp.iid:932334bf-97ee-471a-96c9-c4e5ff526fe4</stEvt:instanceID>
|
||||
<stEvt:when>2020-09-17T16:06:40-07:00</stEvt:when>
|
||||
<stEvt:softwareAgent>Adobe Bridge 2020 (Macintosh)</stEvt:softwareAgent>
|
||||
<stEvt:changed>/metadata</stEvt:changed>
|
||||
</rdf:Description>
|
||||
</rdf:li>
|
||||
</rdf:Seq>
|
||||
</xmpMM:History>
|
||||
</rdf:Description>
|
||||
</rdf:RDF>
|
||||
</x:xmpmeta>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<?xpacket end="w"?></metadata>
|
||||
<defs><style>.cls-1{fill:#717073;}.cls-2{fill:#727174;}.cls-3{fill:#fff;}.cls-4{fill:#78be43;}.cls-5{fill:#1abfd3;}.cls-6{fill:#79459b;}.cls-7{fill:#1e4488;}.cls-8{fill:#218fcf;}</style></defs><path class="cls-1" d="M170.52,58.21h16.74a17.43,17.43,0,0,1,7.68,1.68,13.45,13.45,0,0,1,5.49,4.71,12.54,12.54,0,0,1,0,13.62,13.45,13.45,0,0,1-5.49,4.71,17.43,17.43,0,0,1-7.68,1.68H175.2V99.43h-4.68Zm15.9,22a13.23,13.23,0,0,0,8.19-2.34,8.21,8.21,0,0,0,0-12.84,13.23,13.23,0,0,0-8.19-2.34H175.2V80.17Z"/><path class="cls-1" d="M209.82,58.21h4.68V99.43h-4.68Z"/><path class="cls-1" d="M225.78,58.21h4.68L256,91.75V58.21h4.68V99.43H256L230.46,65.89V99.43h-4.68Z"/><path class="cls-1" d="M272,58.21h4.68l25.56,33.54V58.21h4.68V99.43h-4.68L276.66,65.89V99.43H272Z"/><path class="cls-1" d="M318.18,58.21h4.68V99.43h-4.68Z"/><path class="cls-1" d="M334.14,58.21h16.74a17.46,17.46,0,0,1,7.68,1.68,13.51,13.51,0,0,1,5.49,4.71,12.54,12.54,0,0,1,0,13.62,13.51,13.51,0,0,1-5.49,4.71,17.46,17.46,0,0,1-7.68,1.68H338.82V99.43h-4.68Zm15.9,22a13.23,13.23,0,0,0,8.19-2.34,8.21,8.21,0,0,0,0-12.84A13.23,13.23,0,0,0,350,62.65H338.82V80.17Z"/><path class="cls-1" d="M378.18,62.65V76.21h22.38v4.44H378.18V95H403v4.44H373.44V58.21H403v4.44Z"/><path class="cls-1" d="M411.12,58.21H425a24.6,24.6,0,0,1,11.54,2.64,19.73,19.73,0,0,1,7.92,7.32,21.2,21.2,0,0,1,0,21.27,19.66,19.66,0,0,1-7.92,7.35A24.6,24.6,0,0,1,425,99.43H411.12Zm13.92,37a19.21,19.21,0,0,0,9.08-2.1,15.61,15.61,0,0,0,6.25-5.82,17,17,0,0,0,0-16.89,15.68,15.68,0,0,0-6.25-5.79,19.21,19.21,0,0,0-9.08-2.1h-9.25v32.7Z"/><path class="cls-2" d="M91.14,25.5A52.5,52.5,0,1,0,143.64,78,52.51,52.51,0,0,0,91.14,25.5Zm0,95.33A42.83,42.83,0,1,1,134,78,42.83,42.83,0,0,1,91.14,120.83Z"/><circle class="cls-3" cx="91.33" cy="77.84" r="45.75"/><circle class="cls-4" cx="91.16" cy="76.71" r="8"/><path class="cls-5" d="M118.92,58.45l1.66,6.89,5.12-.66-3-12.42-.15-.5v0l-11.73-5.08-1.53,5,6.48,2.8L101.26,66.65a14.14,14.14,0,0,1,2.9,4.24Z"/><path class="cls-6" d="M66.46,54.41,73,51.61l-1.53-5L59.68,51.73v0l-.15.5-3,12.42,5.13.66,1.65-6.89L78.13,70.94A14.23,14.23,0,0,1,81,66.68Z"/><path class="cls-7" d="M57.49,82.82,59.21,76l-4.87-1.8L51.23,86.56l0,0,.31.42,8,9.94,3.66-3.66-4.47-5.51,19.82-4.35A14.23,14.23,0,0,1,77,78.54Z"/><path class="cls-7" d="M128,74.17,123.11,76l1.72,6.85-19.54-4.28a14.23,14.23,0,0,1-1.56,4.89l19.81,4.35-4.46,5.51L122.73,97l8-9.94.31-.42,0,0Z"/><path class="cls-6" d="M103.35,109l-7.08-.33-.79,5.11,12.76.58h.56l8.14-9.86-4.34-2.85-4.5,5.45-8.43-19a14.36,14.36,0,0,1-4.58,2.28Z"/><path class="cls-5" d="M74.24,107.08l-4.5-5.45-4.34,2.85,8.14,9.86h.56l12.76-.58-.78-5.11L79,109l8.26-18.57a14.28,14.28,0,0,1-4.59-2.27Z"/><path class="cls-8" d="M93.78,62.7V43.84L100.12,47l2.79-4.35L91.49,37,91,36.74h0l-11.44,5.7,2.8,4.37,6.33-3.15v19a14.59,14.59,0,0,1,2.49-.23A15,15,0,0,1,93.78,62.7Z"/></svg>
|
||||
|
Before Width: | Height: | Size: 5.4 KiB |
32
doc/scope.md
32
doc/scope.md
@@ -1,32 +0,0 @@
|
||||
# Project Scope
|
||||
|
||||
The Pinniped project is guided by the following principles.
|
||||
* Pinniped lets you plug any external identitiy providers into
|
||||
Kubernetes. These integrations follow enterprise-grade security principles.
|
||||
* Pinniped is easy to install and use on any Kubernetes cluster via
|
||||
distribution-specific integration mechanisms.
|
||||
* Pinniped uses a declarative configuration via Kubernetes APIs.
|
||||
* Pinniped provides optimal user experience when authenticating to many
|
||||
clusters at one time.
|
||||
* Pinniped provides enterprise-grade security posture via secure defaults and
|
||||
revocable or very short-lived credentials.
|
||||
* Where possible, Pinniped will contribute ideas and code to upstream
|
||||
Kubernetes.
|
||||
|
||||
When contributing to Pinniped, please consider whether your contribution follows
|
||||
these guiding principles.
|
||||
|
||||
## Out Of Scope
|
||||
|
||||
The following items are out of scope for the Pinniped project.
|
||||
* Authorization.
|
||||
* Standalone identity provider for general use.
|
||||
* Machine-to-machine (service) identity.
|
||||
* Running outside of Kubernetes.
|
||||
|
||||
## Roadmap
|
||||
|
||||
More details coming soon!
|
||||
|
||||
For more details on proposing features and bugs, check out our
|
||||
[contributing](../CONTRIBUTING.md) doc.
|
||||
14
dockerfiles/code-coverage-uploader/Dockerfile
Normal file
14
dockerfiles/code-coverage-uploader/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For running Go linters
|
||||
FROM debian:12.11-slim AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -sfLo /tmp/codecov https://uploader.codecov.io/latest/linux/codecov
|
||||
RUN chmod +x /tmp/codecov
|
||||
|
||||
FROM golang:1.24.3
|
||||
RUN apt-get update -y && apt-get dist-upgrade -y
|
||||
COPY --from=builder /tmp/codecov /usr/local/bin/codecov
|
||||
10
dockerfiles/crane/Dockerfile
Normal file
10
dockerfiles/crane/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM gcr.io/go-containerregistry/crane as crane
|
||||
FROM mikefarah/yq:4.45.4 AS yq
|
||||
|
||||
FROM golang:1.24.3
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin
|
||||
COPY --from=crane /ko-app/crane /usr/local/bin
|
||||
ENTRYPOINT ["bash"]
|
||||
16
dockerfiles/deployment-yaml-formatter/Dockerfile
Normal file
16
dockerfiles/deployment-yaml-formatter/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM mikefarah/yq:4.45.4 AS yq
|
||||
|
||||
FROM debian:12.11-slim
|
||||
|
||||
# Note: libdigest-sha-perl is to get shasum, which is used when installing Carvel tools below.
|
||||
RUN apt-get update && apt-get install -y ca-certificates jq curl libdigest-sha-perl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install yq.
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
29
dockerfiles/eks-deployer/Dockerfile
Normal file
29
dockerfiles/eks-deployer/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright 2020-2025 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# For deploying an EKS cluster and setting it up to run our tests.
|
||||
|
||||
FROM weaveworks/eksctl:v0.208.0 AS eksctl
|
||||
FROM mikefarah/yq:4.45.4 AS yq
|
||||
FROM amazon/aws-cli:2.27.24
|
||||
RUN yum update -y && yum install -y jq && yum install -y perl-Digest-SHA && yum clean all
|
||||
COPY --from=eksctl eksctl /usr/local/bin/eksctl
|
||||
COPY --from=yq /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
# Install Carvel tools.
|
||||
RUN bash -c "set -eo pipefail; curl -fsL https://carvel.dev/install.sh | bash" && \
|
||||
ytt version && kapp version && kbld version && kwt version && imgpkg version && vendir version
|
||||
|
||||
# Install latest kubectl.
|
||||
RUN curl -sfL "https://dl.k8s.io/release/$(curl -sfL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
|
||||
-o /bin/kubectl && chmod u+x /bin/kubectl
|
||||
|
||||
# Install aws-iam-authenticator.
|
||||
# This gets installed automatically via eksctl, but currently it downloads v0.5.2,
|
||||
# which will give us a v1alpha1 execcredential rather than a v1beta1 which we want.
|
||||
# When this has changed, we can delete this:
|
||||
# https://github.com/weaveworks/eksctl/blob/main/build/docker/Dockerfile#L49
|
||||
RUN curl -sfL \
|
||||
https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.14/aws-iam-authenticator_0.6.14_linux_amd64 \
|
||||
-o /usr/local/bin/aws-iam-authenticator \
|
||||
&& chmod u+x /usr/local/bin/aws-iam-authenticator
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user